diff --git a/.devcontainer/wordcount/devcontainer.json b/.devcontainer/wordcount/devcontainer.json new file mode 100644 index 0000000000..c36e76993e --- /dev/null +++ b/.devcontainer/wordcount/devcontainer.json @@ -0,0 +1,22 @@ +{ + "name": "wordcount", + "image": "mcr.microsoft.com/devcontainers/python:3.13-bookworm", + "workspaceFolder": "/workspaces/materials/wordcount", + "workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/materials,type=bind", + "postCreateCommand": { + "project": "python -m pip install -r requirements.txt -e . && rm -rf src/*.egg-info/", + "help": "echo 'echo -e \"馃挕 Run \\e[1mpytest --task\\e[0m to display instructions for the current task.\n馃挕 Run \\e[1mpytest\\e[0m to evaluate your solution and track your progress.\"' >> ~/.bashrc" + }, + "customizations": { + "codespaces": { + "openFiles": [ + "src/wordcount.py" + ] + }, + "vscode": { + "extensions": [ + "ms-python.python" + ] + } + } +} diff --git a/wordcount/.vscode/settings.json b/wordcount/.vscode/settings.json new file mode 100644 index 0000000000..ab57c75707 --- /dev/null +++ b/wordcount/.vscode/settings.json @@ -0,0 +1,29 @@ +{ + "breadcrumbs.enabled": false, + "editor.dragAndDrop": false, + "editor.fontSize": 20, + "editor.minimap.enabled": false, + "editor.mouseWheelZoom": true, + "editor.renderWhitespace": "all", + "files.exclude": { + "**/.*": true, + "**/__pycache__": true + }, + "git.detectSubmodules": false, + "git.openRepositoryInParentFolders": "never", + "markdown.preview.fontSize": 20, + "python.testing.pytestArgs": [ + "tests" + ], + "python.testing.pytestEnabled": true, + "terminal.integrated.fontSize": 20, + "terminal.integrated.mouseWheelZoom": true, + "window.autoDetectColorScheme": true, + "window.commandCenter": false, + "workbench.editorAssociations": { + "*.md": "vscode.markdown.preview.editor" + }, + "workbench.layoutControl.enabled": false, + "workbench.preferredDarkColorTheme": "GitHub Dark", + "workbench.preferredLightColorTheme": "GitHub Light" +} diff --git a/wordcount/README.md b/wordcount/README.md new file mode 100644 index 0000000000..48d5514ac6 --- /dev/null +++ b/wordcount/README.md @@ -0,0 +1,46 @@ +# Python Project: Build a Word Count Command-Line App + +This folder contains supporting materials for the [wordcount coding challenge](https://realpython.com/courses/word-count-app-project/) on Real Python. + +## How to Get Started? + +### Cloud Environment + +If you'd like to solve this challenge with a minimal setup required, then click the button below to launch a pre-configured environment in the cloud: + +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/realpython/materials?quickstart=1&devcontainer_path=.devcontainer%2Fwordcount%2Fdevcontainer.json) + +Alternatively, follow the steps below to set up the environment on your local machine. + +### Local Computer + +Use the [downloader tool](https://realpython.github.io/gh-download/?url=https%3A%2F%2Fgithub.com%2Frealpython%2Fmaterials%2Ftree%2Fmaster%2Fwordcount) to get the project files or clone the entire [`realpython/materials`](https://github.com/realpython/materials) repository from GitHub and change your directory to `materials/wordcount/`: + +```sh +$ git clone https://github.com/realpython/materials.git +$ cd materials/wordcount/ +``` + +Create and activate a [virtual environment](https://realpython.com/python-virtual-environments-a-primer/), and then install the project in [editable mode](https://setuptools.pypa.io/en/latest/userguide/development_mode.html): + +```sh +$ python -m venv venv/ +$ source venv/bin/activate +(venv) $ python -m pip install -r requirements.txt -e . +``` + +Make sure to include the period at the end of the command! + +## How to Get Feedback? + +To display instructions for your current task: + +```sh +(venv) $ pytest --task +``` + +To track your progress and reveal the acceptance criteria: + +```sh +(venv) $ pytest +``` diff --git a/wordcount/pyproject.toml b/wordcount/pyproject.toml new file mode 100644 index 0000000000..9ea37b63e0 --- /dev/null +++ b/wordcount/pyproject.toml @@ -0,0 +1,19 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project] +name = "wordcount" +version = "1.0.0" +readme = "README.md" +dependencies = [ + "pytest", + "pytest-timeout", + "rich", +] + +[project.scripts] +wordcount = "wordcount:main" + +[tool.black] +line-length = 79 diff --git a/wordcount/requirements.txt b/wordcount/requirements.txt new file mode 100644 index 0000000000..7c882fd036 --- /dev/null +++ b/wordcount/requirements.txt @@ -0,0 +1,9 @@ +iniconfig==2.1.0 +markdown-it-py==3.0.0 +mdurl==0.1.2 +packaging==25.0 +pluggy==1.6.0 +Pygments==2.19.1 +pytest==8.3.5 +pytest-timeout==2.4.0 +rich==14.0.0 diff --git a/wordcount/src/wordcount.py b/wordcount/src/wordcount.py new file mode 100644 index 0000000000..79c6aa51df --- /dev/null +++ b/wordcount/src/wordcount.py @@ -0,0 +1,3 @@ +# Uncomment the main() function below to solve your first task: +# def main(): +# pass diff --git a/wordcount/tests/conftest.py b/wordcount/tests/conftest.py new file mode 100644 index 0000000000..b459117ee5 --- /dev/null +++ b/wordcount/tests/conftest.py @@ -0,0 +1,3 @@ +from fixtures import * # noqa + +pytest_plugins = ["realpython"] diff --git a/wordcount/tests/fixtures.py b/wordcount/tests/fixtures.py new file mode 100644 index 0000000000..56c60cbe09 --- /dev/null +++ b/wordcount/tests/fixtures.py @@ -0,0 +1,229 @@ +import random +import string +from dataclasses import dataclass +from functools import cached_property +from pathlib import Path +from string import ascii_lowercase +from subprocess import run +from tempfile import TemporaryDirectory, gettempdir +from typing import Callable + +import pytest + + +@dataclass +class FakeFile: + content: bytes + counts: tuple[int, ...] + + @cached_property + def path(self) -> Path: + return Path("-") + + def format_line(self, max_digits=None, selected=None): + if selected is None: + selected = 8 + 4 + 1 + numbers = [ + self.counts[i] for i in range(4) if selected & (2 ** (3 - i)) + ] + if max_digits is None: + max_digits = len(str(max(numbers))) + counts = " ".join( + filter(None, [f"{number:{max_digits}}" for number in numbers]) + ) + if self.path.name == "-": + return f"{counts}\n".encode("utf-8") + else: + return f"{counts} {self.path}\n".encode("utf-8") + + +@dataclass +class TempFile(FakeFile): + @cached_property + def path(self) -> Path: + name = "".join(random.choices(ascii_lowercase, k=10)) + return Path(gettempdir()) / name + + def __post_init__(self): + self.path.write_bytes(self.content) + + def delete(self): + if self.path.is_dir(): + self.path.rmdir() + elif self.path.is_file(): + self.path.unlink(missing_ok=True) + + +@dataclass(frozen=True) +class Files: + files: list[FakeFile] + + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + @cached_property + def paths(self): + return [str(file.path) for file in self.files] + + @cached_property + def expected(self): + if len(self.files) > 1: + return self.file_lines + self.total_line + else: + return self.file_lines + + @cached_property + def file_lines(self): + return b"".join(file.format_line() for file in self.files) + + @cached_property + def total_line(self): + totals = [sum(file.counts[i] for file in self.files) for i in range(4)] + md = len(str(max(totals))) + return f"{totals[0]:{md}} {totals[1]:{md}} {totals[3]:{md}} total\n".encode( + "utf-8" + ) + + +@pytest.fixture(scope="session") +def small_file(): + temp_file = TempFile(content=b"caffe\n", counts=(1, 1, 6, 6)) + try: + yield temp_file + finally: + temp_file.delete() + + +@pytest.fixture(scope="session") +def big_file(): + temp_file = TempFile( + content=( + b"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\n" + b"tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\n" + b"quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\n" + b"consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\n" + b"cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\n" + b"proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n" + ), + counts=(6, 69, 447, 447), + ) + try: + yield temp_file + finally: + temp_file.delete() + + +@pytest.fixture(scope="session") +def file1(): + temp_file = TempFile(content=b"caffe latte\n", counts=(1, 2, 12, 12)) + try: + yield temp_file + finally: + temp_file.delete() + + +@pytest.fixture(scope="session") +def file2(): + temp_file = TempFile( + content=b"Lorem ipsum dolor sit amet\n", counts=(1, 5, 27, 27) + ) + try: + yield temp_file + finally: + temp_file.delete() + + +@pytest.fixture(scope="session") +def unicode_file(): + temp_file = TempFile( + content="Za偶贸艂膰 g臋艣l膮 ja藕艅\n".encode("utf-8"), counts=(1, 3, 18, 27) + ) + try: + yield temp_file + finally: + temp_file.delete() + + +@pytest.fixture(scope="session") +def small_files(): + temp_files = [ + TempFile(content=b"Mocha", counts=(0, 1, 5, 5)), + TempFile(content=b"Espresso\n", counts=(1, 1, 9, 9)), + TempFile(content=b"Cappuccino\n", counts=(1, 1, 11, 11)), + TempFile(content=b"Frappuccino", counts=(0, 1, 11, 11)), + TempFile(content=b"Flat White\n", counts=(1, 2, 11, 11)), + TempFile(content=b"Turkish Coffee", counts=(0, 2, 14, 14)), + TempFile(content=b"Irish Coffee Drink\n", counts=(1, 3, 19, 19)), + TempFile(content=b"Espresso con Panna", counts=(0, 3, 18, 18)), + ] + try: + yield Files(temp_files) + finally: + for file in temp_files: + file.delete() + + +@pytest.fixture(scope="session") +def medium_files(file1, file2, unicode_file): + return Files([file1, file2, unicode_file]) + + +@pytest.fixture(scope="session") +def wc(): + def function(*args, stdin: bytes | None = None) -> bytes: + process = run(["wordcount", *args], capture_output=True, input=stdin) + return process.stdout + + return function + + +@pytest.fixture(scope="session") +def fake_dir(): + with TemporaryDirectory(delete=False) as directory: + path = Path(directory) + try: + yield path + finally: + path.rmdir() + + +@pytest.fixture(scope="function") +def random_name(): + return make_random_name() + + +def make_random_name(length=10): + return "".join(random.choices(string.ascii_lowercase, k=length)) + + +@pytest.fixture(scope="session") +def runner(wc, small_file, unicode_file, big_file, fake_dir): + return Runner( + wc, small_file, unicode_file, big_file, fake_dir, make_random_name() + ) + + +@dataclass +class Runner: + wc: Callable + file1: FakeFile + file2: FakeFile + file3: FakeFile + fake_dir: Path + random_name: str + + def __call__(self, *flags): + return self.wc( + *flags, + str(self.file1.path), + "-", + str(self.file2.path), + self.fake_dir, + "-", + str(self.file3.path), + self.random_name, + stdin=b"flat white", + ) diff --git a/wordcount/tests/realpython/HOWTO.md b/wordcount/tests/realpython/HOWTO.md new file mode 100644 index 0000000000..8ca1bd7be5 --- /dev/null +++ b/wordcount/tests/realpython/HOWTO.md @@ -0,0 +1,258 @@ +# How to Define the Acceptance Criteria? + +## File Naming + +In your project's `tests/` directory, create one or more Python source files (`.py`) corresponding to each task in the coding challenge. These files must follow a certain naming convention: `task_?\d+.py`. + +Here are a few examples: + +``` +task5.py +task05.py +task_05.py +``` + +This naming convention makes these files recognizable by the pytest-realpython plugin, which ignores standard pytest files prefixed or suffixed with the word "test." At the same time, users can run regular unit tests by disabling the plugin, e.g., with `pytest -p no:realpython`. + +## Registering Acceptance Criteria for Each Task + +Inside each task file, create a class decorated with the `@task()` decorator: + +```python +from realpython import task + +@task( + number=1, + name="Run the wordcount Command", + url="https://realpython.com/lessons/run-the-wordcount-command-task/", +) +class Test: + def test_one(self): + ... + + def test_two(self): + ... + + def test_three(self): + ... +``` + +This class can be named anything, e.g., `Test`, and you can reuse this name across different files if you want to. + +Task numbering starts at one and there cannot be any duplicates or gaps between task numbers. The plugin will enforce that, or else you'll get an error. + +Note that you can have at most one test class per task, so you won't be able to spread your test methods across multiple classes for the same task. This is to ensure a single source of truth. Otherwise, different classes could use inconsistent task names or URLs, which would be prone to copy-paste errors. + +## Associating Resources With Tasks and Acceptance Criteria + +If a particular test fails a few times, the report displays a list of clickable links that point to Real Python resources, including: + +- Written Tutorials +- Video Courses +- Podcast Episodes +- Learning Paths + +You can associate resources common to all test methods by placing the corresponding decorator on the test class, e.g.: + +```python +from realpython import task, tutorial, course, podcast + +@task( + number=1, + name="Run the wordcount Command", + url="https://realpython.com/lessons/run-the-wordcount-command-task/", +) +@tutorial("python-comments-guide") +@course("writing-comments-python", "Writing Comments in Python") +class Test: + ... +``` + +This will cascade down to the individual test methods, meaning that if one of them fails, then we'll include that resource on the list of hints. + +In contrast, decorating the individual test methods will let you associate resources unique to the acceptance criterion at hand: + +```python +from realpython import task, tutorial, course, podcast + +@task( + number=1, + name="Run the wordcount Command", + url="https://realpython.com/lessons/run-the-wordcount-command-task/", +) +class Test: + + def test_one(self): + ... + + @course("writing-comments-python", "Writing Comments in Python") + def test_two(self): + ... + + @tutorial("python-comments-guide") + def test_three(self): + ... +``` + +These decorators expect the **slug** to identify a resource in the CMS. If you don't provide a title, which is an optional parameter, then the slug will be automatically prettified and used as a link label. + +## Making the Acceptance Criteria Look Pretty + +By default, the plugin will try to prettify the acceptance criteria shown in the report based on the name of the corresponding test method, e.g.: + +```python +def test_reports_zeros_on_an_empty_stream(self): + ... +``` + +...becomes "_Reports zeros on an empty stream_." + +If you'd like to use special characters or punctuation, which are not valid Python syntax, then you can define a docstring in the test method, which will override the method name: + +```python +def test_count_default_stdin(self): + """Counts lines, words, and bytes in stdin by default""" +``` + +## Parameterizing Test Methods + +pytest allows you to run the same test method against different parameters (data-driven testing). The pytest-realpython plugin supports this, so you can do the following: + +```python +import pytest + +@pytest.mark.parametrize("flags", [ + [], + ["-l"], + ["-w"], + ["-c"], + ["-l", "-w", "-c"], +]) +def test_always_displays_counts_in_the_same_order(self, flags): + ... +``` + +The resulting report will append the values of the parameters to the name of the acceptance criteria. This will work regardless of whether you provde a docstring or not. + +## Overriding the Default Timeout of Individual Test Methods + +By default, each test method will time out after a predefined number of seconds. If you'd like to override this default timeout, then use a decorator from the pytest-timeout plugin, like so: + +```python +import pytest + +@task(...) +class Test: + @pytest.mark.timeout(3.5) + def test_one(self): + ... +``` + +## Running Tests in DEBUG Mode + +The plugin disables the `terminalreporter` plugin, which is responsible for pytest's standard output. This is nice for running tests in "production" but hides tracebacks that might be useful during development. To include this core plugin, you can set the `DEBUG` environment variable: + +```sh +$ DEBUG=1 pytest +``` + +Moreover, if something catastrophic happens while running tests, i.e., when the pytest-realpython plugin raises an exception, then we don't show the report. Instead, we print the stack trace. + +Also, it can be useful to preview the pytest cache sometimes: + +```sh +$ pytest --cache-show +``` + +## Writing Assertions + +You can use pytest's standard assertions, i.e.: + +```python +def test_one(self): + assert "foo" == "bar" +``` + +Unfortunately, the `terminalreporter` plugin, which is disabled, tightly couples printing to stdout and collecting test results, rewriting the `assert` statements with custom bytecode instructions. This means that we no longer get the nice output of failed assertions that pytest provides. + +As a workaround, you can append an optional message that will appear as a hint in the test report summary: + +```python +def test_one(self): + assert "expected" == function(), "Be careful about handling this and that" +``` + +However, it sill won't show the **expected vs. actual**. If you want to do that, then you must use a custom assertion function provided by the pytest-realpython plugin: + +```python +from realpython import task, assert_equals + +@task(...) +class Test: + def test_one(self): + assert_equals( + "expected", + function(), + "Your function should return XYZ" + ) +``` + +Note that the order of these arguments matters! The _expected_ value always comes first! Therefore, it's a good idea to be explicit and use named arguments: + +```python +from realpython import task, assert_equals + +@task(...) +class Test: + def test_one(self): + assert_equals( + expected="expected", + actual=function(), + message="Your function should return XYZ" + ) +``` + +If you just want to show the expected vs actual without any extra message, then you can omit the third argument: + +```python +from realpython import task, assert_equals + +@task(...) +class Test: + def test_one(self): + assert_equals("expected", function()) +``` + +Note that the messages support the Markdown syntax, so you can include links and the desired font formatting. + +## Using the CLI + +Run all test methods up to the current task: + +```sh +$ pytest +``` + +When all the acceptance criteria so far are passed, you advance to the next task, unlocking it. + +Run only the failed tests: + +```shell +$ pytest --last-failed +``` + +Run the test methods of a specific task: + +```sh +$ pytest -k 5 +$ pytest -k 10 +$ pytest -k 01 +``` + +Note that typing `-k 1` would run both task_01 and task_10 because it's an expression to be matched against pytest's node id. + +Erase the progress to start from scratch (without deleting or reverting Python source files): + +```sh +$ pytest --cache-clear +``` diff --git a/wordcount/tests/realpython/__init__.py b/wordcount/tests/realpython/__init__.py new file mode 100644 index 0000000000..a5f9712bd1 --- /dev/null +++ b/wordcount/tests/realpython/__init__.py @@ -0,0 +1,7 @@ +# flake8: noqa + +from .assertions import * +from .constants import * +from .hooks import * +from .resources import course, external, learning_path, podcast, tutorial +from .tasks import task diff --git a/wordcount/tests/realpython/assertions.py b/wordcount/tests/realpython/assertions.py new file mode 100644 index 0000000000..f998870196 --- /dev/null +++ b/wordcount/tests/realpython/assertions.py @@ -0,0 +1,19 @@ +"""PYTEST_DONT_REWRITE""" # Disable pytest's assertion rewriting for this module! + +from .exceptions import RealPythonAssertionError + + +def assert_equals(expected, actual, message=None): + if expected != actual: + raise RealPythonAssertionError(expected, actual, message) + + +def assert_equals_if(expected, actual, message=None): + """Only show the expected vs. actual table on a truthy value.""" + if bool(actual): + assert_equals(expected, actual, message) + else: + if message: + assert expected == actual, message + else: + assert expected == actual diff --git a/wordcount/tests/realpython/constants.py b/wordcount/tests/realpython/constants.py new file mode 100644 index 0000000000..3004371fad --- /dev/null +++ b/wordcount/tests/realpython/constants.py @@ -0,0 +1,7 @@ +from pytest import StashKey, TestReport + +STASH_REPORT_KEY = StashKey[TestReport]() +CACHE_TASKS_KEY = "realpython/tasks" +COMMAND_TASK = "--task" +MIN_FAILURES_BEFORE_HINT = 3 +TEST_TIMEOUT_SECONDS = 1.5 diff --git a/wordcount/tests/realpython/exceptions.py b/wordcount/tests/realpython/exceptions.py new file mode 100644 index 0000000000..de4ae40eb1 --- /dev/null +++ b/wordcount/tests/realpython/exceptions.py @@ -0,0 +1,5 @@ +class RealPythonAssertionError(AssertionError): + def __init__(self, expected, actual, message=None): + self.expected = expected + self.actual = actual + self.message = message diff --git a/wordcount/tests/realpython/hooks.py b/wordcount/tests/realpython/hooks.py new file mode 100644 index 0000000000..df16fd08f0 --- /dev/null +++ b/wordcount/tests/realpython/hooks.py @@ -0,0 +1,195 @@ +import os +import re +import sys +import traceback +import webbrowser +from operator import attrgetter +from unittest.mock import Mock + +import pytest +from _pytest.outcomes import OutcomeException +from pytest import Config, Item, Parser, Session, TestReport + +from . import RealPythonAssertionError +from .constants import ( + COMMAND_TASK, + MIN_FAILURES_BEFORE_HINT, + STASH_REPORT_KEY, + TEST_TIMEOUT_SECONDS, +) +from .models import ExerciseProgress, TestRun, TestStatus +from .resources import Resource +from .view import Display + +error = False + + +def pytest_exception_interact(call, report): + if call.excinfo.type is RealPythonAssertionError: + report.exception = call.excinfo.value + elif call.excinfo.type is AssertionError: + try: + message = call.excinfo.value.args[0] + except IndexError: + pass + else: + report.exception = RealPythonAssertionError( + expected=None, + actual=None, + message=f"\N{ELECTRIC LIGHT BULB} {message}", + ) + elif issubclass(call.excinfo.type, OutcomeException): + report.exception = None + else: + global error + error = True + traceback.print_exception(call.excinfo.value, file=sys.stderr) + + +def pytest_collect_file(parent, file_path): + if re.fullmatch(r"task_?\d+.py", file_path.name): + return pytest.Module.from_parent(parent, path=file_path) + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + COMMAND_TASK, + action="store_true", + help="Show instructions for the current task", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + # Suppress pytest's default output unless in help or debug mode: + if not config.getoption("--help") and not os.getenv("DEBUG"): + _disable_plugin(config, "terminalreporter") + # Other plugins are tightly coupled to the terminal reporter: + config.pluginmanager.register(Mock(), "terminalreporter") + + # Disable stdout/stderr capturing unless explicitly enabled: + if not any( + x.startswith("--capture") for x in config.invocation_params.args + ): + _disable_plugin(config, "capturemanager") + + +def pytest_collection_modifyitems(config, items): + # Discard items not associated with a @task() + for item in items.copy(): + if not hasattr(item.function, "task"): + items.remove(item) + + # Ensure the task numbers start at 1 and are contiguous + if task_numbers := set(item.function.task.number for item in items): + if min(task_numbers) != 1: + raise ValueError("task numbers must start at 1") + if max(task_numbers) != len(task_numbers): + raise ValueError("task numbers must be contiguous") + + timeout = pytest.mark.timeout(TEST_TIMEOUT_SECONDS, method="signal") + skip = pytest.mark.skip(reason="The task must be unlocked first") + + progress = ExerciseProgress.from_cache(config.cache) + for item in items: + # Apply a default timeout to each test function + # (can override individual test functions with @pytest.mark.timeout) + item.add_marker(timeout) + + # Skip functions owned by tasks that haven't been unlocked yet: + if item.function.task.number > progress.last_unlocked: + item.add_marker(skip) + + # Order test functions by the task number they belong to: + items.sort(key=attrgetter("function.task.number")) + + +@pytest.hookimpl(wrapper=True) +def pytest_runtest_makereport(item: Item): + # Store the test result on the corresponding item instance: + match report := (yield): + case TestReport(when="setup", outcome="skipped"): + item.stash[STASH_REPORT_KEY] = report + case TestReport(when="call"): + item.stash[STASH_REPORT_KEY] = report + return report + + +def pytest_sessionfinish(session: Session): + if error: + return + + if not session.items: + return + + if session.config.option.cacheclear: # pytest --cache-clear + return + + progress = ExerciseProgress.from_cache(session.config.cache) + test_run = TestRun.from_session(session) + display = Display(session.config) + + if session.config.getoption(COMMAND_TASK): + last_unlocked = test_run.task(progress.last_unlocked) + display.print(last_unlocked.url) + webbrowser.open(last_unlocked.url) + return + + try: + _ = test_run.status + except ValueError: + return # None of the tests were executed, e.g., --collect-only + + for test in test_run.tests: + progress.update(test) + + display.summary(progress, test_run) + + if _new_task_unlocked(progress, test_run): + if progress.last_unlocked < test_run.num_tasks: + progress.last_unlocked += 1 + next_task = test_run.task(progress.last_unlocked) + display.unlocked(next_task) + else: + if session.config.option.keyword or session.config.option.lf: + # Don't show congratulations when running a subset of tests + # pytest -k + # pytest --last-failed + pass + else: + display.congratulations() + else: + if resources := _get_resources(progress, test_run): + display.hint(resources) + + # Update cache + progress.save() + + +def _disable_plugin(config: Config, plugin_name: str): + if plugin := config.pluginmanager.getplugin(plugin_name): + config.pluginmanager.unregister(plugin) + + +def _new_task_unlocked(progress: ExerciseProgress, test_run: TestRun) -> bool: + return all( + test.status is TestStatus.PASSED + for test in test_run.tests + if test.task_number <= progress.last_unlocked + ) + + +def _get_resources( + progress: ExerciseProgress, test_run: TestRun +) -> list[Resource]: + return sorted( + set( + resource + for test in test_run.tests + if test.status in (TestStatus.FAILED, TestStatus.TIMED_OUT) + and progress.num_failures(test) > MIN_FAILURES_BEFORE_HINT - 1 + and hasattr(test.function, "resources") + for resource in test.function.resources + ), + key=lambda resource: resource.title_pretty, + ) diff --git a/wordcount/tests/realpython/models.py b/wordcount/tests/realpython/models.py new file mode 100644 index 0000000000..c30e1f99a6 --- /dev/null +++ b/wordcount/tests/realpython/models.py @@ -0,0 +1,211 @@ +import re +from collections import defaultdict +from dataclasses import dataclass +from enum import Enum +from functools import cached_property +from itertools import groupby +from operator import attrgetter +from typing import Iterator, Self + +from pytest import Cache, Function, Item, Session + +from .constants import CACHE_TASKS_KEY, STASH_REPORT_KEY +from .exceptions import RealPythonAssertionError + + +@dataclass(frozen=True) +class Task: + number: int + name: str + url: str + + def __str__(self) -> str: + return f"[Task {self.number}: {self.name}]({self.url})" + + +class TestStatus(Enum): + PASSED = "passed" + FAILED = "failed" + SKIPPED = "skipped" + TIMED_OUT = "timed_out" + + +@dataclass(frozen=True) +class Test: + item: Item + status: TestStatus + exception: RealPythonAssertionError | None + + @cached_property + def id(self) -> str: + return self.item.nodeid + + @cached_property + def function(self) -> Function | None: + if hasattr(self.item, "function"): + return self.item.function + else: + return None + + @cached_property + def task_number(self) -> int | None: + if self.function and hasattr(self.function, "task"): + return self.function.task.number + else: + return None + + @cached_property + def name(self) -> str: + docstring = self.function.__doc__ if self.function else None + full_name = self.id.split("::")[-1] + if match := re.fullmatch(r"([^\[]+)(\[([^]]+)])?", full_name): + function_name = match.group(1) + params = match.group(3) + pretty_name = ( + function_name.removeprefix("test_") + .replace("_", " ") + .capitalize() + ) + if params: + if docstring: + return f"{docstring} ({params})" + else: + return f"{pretty_name} ({params})" + else: + if docstring: + return docstring + else: + return pretty_name + else: + return docstring if docstring else full_name + + +@dataclass +class ExerciseProgress: + cache: Cache + root: dict + + def __post_init__(self): + """Ensure that the "statuses" key is a defaultdict instance.""" + self.root["statuses"] = defaultdict( + dict, self.root.get("statuses", {}) + ) + + @classmethod + def from_cache(cls, cache: Cache) -> Self: + return cls( + cache, + cache.get( + CACHE_TASKS_KEY, + {"last_unlocked": 1, "statuses": defaultdict(dict)}, + ), + ) + + @property + def last_unlocked(self) -> int: + return max(1, self.root.get("last_unlocked", 1)) + + @last_unlocked.setter + def last_unlocked(self, task_number: int) -> None: + self.root["last_unlocked"] = task_number + + def save(self) -> None: + self.cache.set(CACHE_TASKS_KEY, self.root) + + def update(self, test: Test) -> None: + node = self.root["statuses"][str(test.task_number)] + match test.status: + case TestStatus.PASSED | TestStatus.SKIPPED as status: + node[test.id] = status.value + case TestStatus.FAILED | TestStatus.TIMED_OUT as status: + node[test.id] = {status.value: self.num_failures(test) + 1} + + def num_failures(self, test: Test) -> int: + match self.root.get("statuses", {}).get(str(test.task_number), {}).get( + test.id + ): + case None | "skipped" | "passed": + return 0 + case {"failed": times} | {"timed_out": times}: + return times + case unknown: + raise ValueError(f"Unknown cached test result: {unknown}") + + +@dataclass(frozen=True) +class TestRun: + tests: tuple[Test, ...] + + @classmethod + def from_session(cls, session: Session) -> Self: + tests = [] + for item in session.items: + if STASH_REPORT_KEY in item.stash: + report = item.stash[STASH_REPORT_KEY] + if "Failed: Timeout >" in report.longreprtext: + status = TestStatus.TIMED_OUT + else: + status = TestStatus(report.outcome) + if hasattr(report, "exception"): + exception = report.exception + else: + exception = None + tests.append(Test(item, status, exception)) + return cls(tuple(tests)) + + @cached_property + def num_passed(self) -> int: + return sum( + 1 for test in self.tests if test.status is TestStatus.PASSED + ) + + @cached_property + def num_tests(self) -> int: + return len(self.tests) + + @cached_property + def num_tasks(self) -> int: + return len({test.task_number for test in self.tests}) + + @property + def tests_by_task(self) -> Iterator[tuple[int, Iterator[Test]]]: + # Assume tests have been already sorted by the task number + return groupby(self.tests, attrgetter("task_number")) + + @cached_property + def status(self) -> TestStatus: + statuses = {test.status for test in self.tests} + if TestStatus.TIMED_OUT in statuses: + return TestStatus.TIMED_OUT + elif TestStatus.FAILED in statuses: + return TestStatus.FAILED + elif set(statuses) == {TestStatus.PASSED} or { + TestStatus.PASSED, + TestStatus.SKIPPED, + }: + return TestStatus.PASSED + else: + raise ValueError("None of the tests were executed") + + def task(self, task_number: int) -> Task: + for test in self.tests: + if test.task_number == task_number: + if test.function and hasattr(test.function, "task"): + return test.function.task + raise ValueError(f"invalid task number {task_number}") + + def task_status(self, task_number: int) -> TestStatus: + statuses = { + test.status + for test in self.tests + if test.task_number == task_number + } + if statuses: + if TestStatus.TIMED_OUT in statuses: + return TestStatus.TIMED_OUT + elif TestStatus.FAILED in statuses: + return TestStatus.FAILED + else: + return TestStatus.PASSED + else: + raise ValueError(f"invalid task number {task_number}") diff --git a/wordcount/tests/realpython/readme.py b/wordcount/tests/realpython/readme.py new file mode 100644 index 0000000000..c3d9808720 --- /dev/null +++ b/wordcount/tests/realpython/readme.py @@ -0,0 +1,21 @@ +import re +from functools import cached_property + +from pytest import Config + + +class Readme: + def __init__(self, config: Config) -> None: + path = config.rootpath / "README.md" + if path.exists(): + self._content = path.read_text(encoding="utf-8") + else: + self._content = "" + self._folder_name = config.rootpath.name + + @cached_property + def exercise_name(self) -> str: + if match := re.search(r"^# (.+)", self._content): + return match.group(1).title() + else: + return self._folder_name.title() diff --git a/wordcount/tests/realpython/resources.py b/wordcount/tests/realpython/resources.py new file mode 100644 index 0000000000..75ffe8fc3d --- /dev/null +++ b/wordcount/tests/realpython/resources.py @@ -0,0 +1,128 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +from inspect import getmembers, isclass, isfunction +from typing import Callable + + +@dataclass(unsafe_hash=True) +class ExternalResource: + url: str + title: str + + @property + def title_pretty(self): + return self.title + + def __str__(self) -> str: + return f"[{self.title_pretty}]({self.url})" + + +@dataclass(unsafe_hash=True) +class Resource(ABC): + slug: str + title: str | None = None + + @property + def slug_clean(self) -> str: + return self.slug.strip("/") + + @property + def title_pretty(self) -> str: + if self.title is None: + return self.slug_clean.replace("-", " ").title() + else: + return self.title + + @property + @abstractmethod + def url(self) -> str: + pass + + def __str__(self) -> str: + return f"[{self.title_pretty}]({self.url})" + + +@dataclass(unsafe_hash=True) +class Tutorial(Resource): + section_id: str | None = None + + @property + def title_pretty(self) -> str: + if self.section_id and not self.title: + return self.section.replace("-", " ").title() + else: + return super().title_pretty + + @property + def url(self) -> str: + if self.section_id: + return f"https://realpython.com/{self.slug_clean}/#{self.section}" + else: + return f"https://realpython.com/{self.slug_clean}/" + + @property + def section(self): + return self.section_id.lstrip("#") + + +class Course(Resource): + @property + def url(self) -> str: + return f"https://realpython.com/courses/{self.slug_clean}/" + + +class LearningPath(Resource): + @property + def url(self) -> str: + return f"https://realpython.com/learning-paths/{self.slug_clean}/" + + +class Podcast(Resource): + @property + def url(self) -> str: + return f"https://realpython.com/podcasts/rpp/{self.slug_clean}/" + + def __str__(self) -> str: + episode = f"Episode {self.slug_clean}" + if self.title: + return f"[{episode}: {self.title_pretty}]({self.url})" + else: + return f"[{episode}]({self.url})" + + +def tutorial( + slug: str, title: str | None = None, section: str | None = None +) -> Callable: + return _associate(Tutorial, slug, title, section) + + +def course(slug: str, title: str | None = None) -> Callable: + return _associate(Course, slug, title) + + +def learning_path(slug: str, title: str | None = None) -> Callable: + return _associate(LearningPath, slug, title) + + +def podcast(slug: str, title: str | None = None) -> Callable: + return _associate(Podcast, slug, title) + + +def external(url: str, title: str) -> Callable: + return _associate(ExternalResource, url, title) + + +def _associate(resource: type, *args) -> Callable: + def decorator(obj: type | Callable) -> type | Callable: + match obj: + case cls if isclass(obj): + for name, function in getmembers(cls, isfunction): + if name.startswith("test"): + setattr(cls, name, decorator(function)) + case test_function if isfunction(obj): + if not hasattr(test_function, "resources"): + test_function.resources = set() + test_function.resources.add(resource(*args)) + return obj + + return decorator diff --git a/wordcount/tests/realpython/tasks.py b/wordcount/tests/realpython/tasks.py new file mode 100644 index 0000000000..b07df00804 --- /dev/null +++ b/wordcount/tests/realpython/tasks.py @@ -0,0 +1,31 @@ +from inspect import getmembers, isfunction +from typing import Callable + +from .models import Task + + +def task(*, number: int, name: str, url: str) -> Callable: + def decorator(cls: type) -> type: + # Allow only one test class per task (single source of truth) + if number in _registered_task_numbers: + raise ValueError(f"duplicate task number {number}") + _registered_task_numbers.add(number) + + # Cascade down the task to all test functions in the class: + for test_function in _get_test_functions(cls): + test_function.task = Task(number, name, url) + + return cls + + return decorator + + +def _get_test_functions(cls): + return ( + function + for symbol, function in getmembers(cls, isfunction) + if symbol.startswith("test") + ) + + +_registered_task_numbers: set[int] = set() diff --git a/wordcount/tests/realpython/view.py b/wordcount/tests/realpython/view.py new file mode 100644 index 0000000000..3f35b717ba --- /dev/null +++ b/wordcount/tests/realpython/view.py @@ -0,0 +1,159 @@ +import os + +from pytest import Config +from rich.console import Console, Group +from rich.markdown import Markdown +from rich.panel import Panel +from rich.progress import BarColumn, TextColumn +from rich.progress import Progress as ProgressBar +from rich.table import Table +from rich.tree import Tree + +from . import RealPythonAssertionError +from .models import ExerciseProgress, Task, TestRun, TestStatus +from .readme import Readme +from .resources import Resource + + +class Display: + def __init__(self, config: Config) -> None: + self._readme = Readme(config) + self._console = Console(force_terminal=True) + + def print(self, *args, **kwargs) -> None: + self._console.print(*args, **kwargs) + + def hint(self, resources: list[Resource]) -> None: + lines = [ + "\N{ELECTRIC LIGHT BULB} Need help? Check out these resources:", + *[f"- {resource}" for resource in resources], + ] + self.print(Markdown("\n".join(lines))) + + def unlocked(self, next_task: Task) -> None: + self.print("Yay! You've unlocked another task \N{PARTY POPPER}") + self.print( + Markdown(f"\N{WHITE RIGHT POINTING BACKHAND INDEX} {next_task}") + ) + + def congratulations(self) -> None: + self.print( + "Congratulations! You've completed the whole coding challenge " + "\N{FACE WITH PARTY HORN AND PARTY HAT}" + ) + + def summary(self, progress: ExerciseProgress, test_run: TestRun) -> None: + status_color = _color(test_run.status) + self.print( + Panel( + Group( + _legend(), + "", + _tree(progress, test_run), + "", + _progress_bar(test_run, status_color), + ), + title=f"[b]{self._readme.exercise_name}[/b]", + border_style=status_color, + expand=False, + ) + ) + + +def _legend() -> str: + return ( + "[b]Status Indicators[/]\n" + "[green]\N{CHECK MARK} Completed " + "[red]\N{BALLOT X} Failed " + "[gold1]\N{STOPWATCH} Timed out " + "[grey37]\N{LOCK} Locked" + ) + + +def _tree(progress: ExerciseProgress, test_run: TestRun) -> Tree: + tree = Tree("[bold]Tasks[/]") + for task_number, task_tests in test_run.tests_by_task: + task_tests = tuple(task_tests) + test_function = task_tests[0].function + if test_function and hasattr(test_function, "task"): + task = test_function.task + if task_number > progress.last_unlocked: + tree.add( + f"[grey37] \N{LOCK} [b]Task {task_number}: {task.name}[/]" + ) + else: + task_status = test_run.task_status(task_number) + color = _color(task_status) + icon = _icon(task_status) + task_branch = tree.add( + f"[{color}][b]{icon} Task {task_number}: {task.name}[/]" + ) + for test in task_tests: + color = _color(test.status) + icon = _icon(test.status) + test_branch = task_branch.add( + f"[{color}]{icon} {test.name}[/]" + ) + if test.status is TestStatus.FAILED and test.exception: + test_branch.add(_assertion(test.exception)) + return tree + + +def _progress_bar(test_run: TestRun, color: str) -> ProgressBar: + progress_bar = ProgressBar( + TextColumn("[bold][progress.description]{task.description}[/]"), + BarColumn(complete_style=color, finished_style="green"), + TextColumn( + f"[progress.percentage][{color}]{{task.percentage:>3.0f}}%" + ), + expand=True, + ) + task_id = progress_bar.add_task("Progress", total=test_run.num_tests) + progress_bar.update(task_id, completed=test_run.num_passed) + return progress_bar + + +def _color(status: TestStatus) -> str: + return { + TestStatus.PASSED: "green", + TestStatus.FAILED: "red", + TestStatus.TIMED_OUT: "gold1", + TestStatus.SKIPPED: "grey37 strike", + }.get(status, "#ffffff") + + +def _icon(status: TestStatus) -> str: + return { + TestStatus.PASSED: "\N{CHECK MARK}", + TestStatus.FAILED: "\N{BALLOT X}", + TestStatus.TIMED_OUT: "\N{STOPWATCH}", + }.get(status, "") + + +def _assertion(exception: RealPythonAssertionError) -> Panel: + def repr_(value): + if isinstance(value, bytes): + return repr(value.decode("utf-8")) + if isinstance(value, str): + return repr(value) + return repr(value) + + elements = [] + if exception.message: + elements.append(Markdown(exception.message)) + if exception.expected != exception.actual: + table = Table(show_edge=False, style="red") + table.add_column("Expected", header_style="red") + table.add_column("Actual", header_style="red") + table.add_row( + Markdown(f"```python\n{repr_(exception.expected)}\n```"), + Markdown(f"```python\n{repr_(exception.actual)}\n```"), + ) + elements.append(table) + if len(elements) == 2: + elements.insert(1, "") + return Panel( + Group(*elements), + width=round(os.get_terminal_size().columns * 0.5), + border_style="red", + ) diff --git a/wordcount/tests/task_01.py b/wordcount/tests/task_01.py new file mode 100644 index 0000000000..9afc6753c6 --- /dev/null +++ b/wordcount/tests/task_01.py @@ -0,0 +1,28 @@ +import os +from subprocess import run + +from realpython import assert_equals, course, task, tutorial + + +@task( + number=1, + name="Run the wordcount Command", + url="https://realpython.com/lessons/wordcount-run-command-task/", +) +@tutorial("defining-your-own-python-function") +@tutorial("terminal-commands", "The Terminal: First Steps and Useful Commands") +@tutorial("python-pass", "The `pass` Statement: How to Do Nothing in Python") +@course("using-terminal-linux", "Using the Terminal on Linux") +@tutorial("python-comments-guide", "Writing Comments in Python (Guide)") +@course("writing-comments-python", "Writing Comments in Python") +class Test: + def test_command_returns_successfully(self): + process = run(["wordcount", os.devnull], capture_output=True) + assert_equals( + expected=0, + actual=process.returncode, + message=( + "The process should return a zero [exit status]" + "(https://en.wikipedia.org/wiki/Exit_status) code" + ), + ) diff --git a/wordcount/tests/task_02.py b/wordcount/tests/task_02.py new file mode 100644 index 0000000000..0544f8c7f6 --- /dev/null +++ b/wordcount/tests/task_02.py @@ -0,0 +1,45 @@ +from realpython import assert_equals, assert_equals_if, task, tutorial + + +@task( + number=2, + name="Read Data From Standard Input", + url="https://realpython.com/lessons/wordcount-read-data-from-standard-input-task/", +) +@tutorial("python-command-line-arguments", section="standard-input") +@tutorial("python-strings", "Strings and Character Data in Python") +@tutorial( + "python-string-split-concatenate-join", + "Splitting, Concatenating, and Joining Strings in Python", +) +@tutorial("len-python-function", "Using the `len()` Function in Python") +@tutorial("python-variables", "Variables in Python: Usage and Best Practices") +@tutorial("python-print", "Your Guide to the Python `print()` Function") +class Test: + def test_reports_zeros_on_an_empty_stream(self, wc): + assert_equals(b"0 0 0\n", wc()) + + def test_handles_a_short_word_without_trailing_newline(self, wc): + assert_equals_if(b"0 1 5\n", wc(stdin=b"caffe")) + + def test_handles_a_short_word_with_trailing_newline(self, wc): + assert_equals_if(b"1 1 6\n", wc(stdin=b"caffe\n")) + + def test_delimits_words_on_whitespace(self, wc): + assert_equals_if( + expected=b"1 1 9\n", + actual=wc(stdin=b"back-end\n"), + message="Pay attention to punctuation and special characters.", + ) + + def test_handles_linux_newline(self, wc): + r"""Handles the Linux newline (\n)""" + assert_equals_if(b"1 2 7\n", wc(stdin=b"hot\ntea")) + + def test_handles_macos_newline(self, wc): + r"""Handles the macOS newline (\r)""" + assert_equals_if(b"0 2 7\n", wc(stdin=b"hot\rtea")) + + def test_handles_windows_newline(self, wc): + r"""Handles the Windows newline (\r\n)""" + assert_equals_if(b"1 2 8\n", wc(stdin=b"hot\r\ntea")) diff --git a/wordcount/tests/task_03.py b/wordcount/tests/task_03.py new file mode 100644 index 0000000000..95d467f64e --- /dev/null +++ b/wordcount/tests/task_03.py @@ -0,0 +1,28 @@ +from realpython import external, task, tutorial + + +@task( + number=3, + name="Handle Non-ASCII Unicode Characters", + url="https://realpython.com/lessons/wordcount-handle-non-ascii-characters-task/", +) +@external( + url="https://docs.python.org/3/howto/unicode.html#python-s-unicode-support", + title="Python's Unicode Support", +) +@tutorial( + "python-encodings-guide", + "Unicode & Character Encodings in Python: A Painless Guide", +) +@tutorial( + "read-write-files-python", "Reading and Writing Files in Python (Guide)" +) +class Test: + def test_decodes_multibyte_character_without_trailing_newline(self, wc): + """Decodes a multi-byte character without a trailing newline""" + hint = "Note the difference between _e_ and _猫_, for example." + assert b"0 1 6\n" == wc(stdin=b"caff\xc3\xa8"), hint + + def test_decodes_multibyte_character_with_trailing_newline(self, wc): + """Decodes a multi-byte character with a trailing newline""" + assert b"1 1 7\n" == wc(stdin=b"caff\xc3\xa8\n") diff --git a/wordcount/tests/task_04.py b/wordcount/tests/task_04.py new file mode 100644 index 0000000000..fc849527eb --- /dev/null +++ b/wordcount/tests/task_04.py @@ -0,0 +1,44 @@ +from realpython import task, tutorial + + +@task( + number=4, + name="Format Numbers in Displayed Counts", + url="https://realpython.com/lessons/wordcount-format-numbers-task/", +) +@tutorial( + "how-to-python-f-string-format-float", + "How to Format Floats Within F-Strings in Python", +) +@tutorial( + "python-f-strings", + "Python's F-String for String Interpolation and Formatting", +) +@tutorial( + "python-min-and-max", + "Python's `min()` and `max()`: Find Smallest and Largest Values", +) +class Test: + def test_long_word_without_trailing_newline(self, wc): + assert b" 0 1 29\n" == wc(stdin=b"floccinaucinihilipilification") + + def test_long_word_with_trailing_newline(self, wc): + assert b" 1 1 30\n" == wc(stdin=b"floccinaucinihilipilification\n") + + def test_multiple_words_without_trailing_newline(self, wc): + assert b" 0 5 26\n" == wc(stdin=b"Lorem ipsum dolor sit amet") + + def test_multiple_words_with_trailing_newline(self, wc): + assert b" 1 5 27\n" == wc(stdin=b"Lorem ipsum dolor sit amet\n") + + def test_long_text_multiple_lines(self, wc): + assert b" 6 69 447\n" == wc( + stdin=( + b"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\n" + b"tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\n" + b"quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\n" + b"consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\n" + b"cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\n" + b"proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n" + ) + ) diff --git a/wordcount/tests/task_05.py b/wordcount/tests/task_05.py new file mode 100644 index 0000000000..d44f00ef5f --- /dev/null +++ b/wordcount/tests/task_05.py @@ -0,0 +1,27 @@ +from realpython import assert_equals, task, tutorial + + +@task( + number=5, + name="Read Data From a File", + url="https://realpython.com/lessons/wordcount-read-data-from-file-task/", +) +@tutorial("python-conditional-statements", "Conditional Statements in Python") +@tutorial( + "python-pathlib", "Python's `pathlib` Module: Taming the File System" +) +@tutorial( + "python-command-line-arguments", + "The `sys.argv` Array", + section="the-sysargv-array", +) +class Test: + def test_displays_counts_and_a_filename_on_the_same_line( + self, wc, small_files + ): + for file in small_files: + assert_equals(file.format_line(), wc(str(file.path))) + + def test_treats_the_dash_character_as_standard_input(self, wc): + """Treats the dash character (-) as standard input""" + assert b"1 1 6\n" == wc("-", stdin=b"latte\n") diff --git a/wordcount/tests/task_06.py b/wordcount/tests/task_06.py new file mode 100644 index 0000000000..cc9ebf56bd --- /dev/null +++ b/wordcount/tests/task_06.py @@ -0,0 +1,23 @@ +from realpython import task, tutorial + + +@task( + number=6, + name="Ignore Directories and Missing Files", + url="https://realpython.com/lessons/wordcount-ignore-invalid-paths-task/", +) +@tutorial("python-exceptions", "Python Exceptions: An Introduction") +@tutorial( + "python-built-in-exceptions", + "Python's Built-in Exceptions: A Walkthrough With Examples", +) +class Test: + def test_reports_zeros_on_a_directory(self, wc, fake_dir): + expected = f"0 0 0 {fake_dir}/ (is a directory)\n".encode() + assert expected == wc(fake_dir) + + def test_reports_zeros_on_a_missing_file(self, wc, random_name): + expected = ( + f"0 0 0 {random_name} (no such file or directory)\n".encode() + ) + assert expected == wc(random_name) diff --git a/wordcount/tests/task_07.py b/wordcount/tests/task_07.py new file mode 100644 index 0000000000..a4b52d03a3 --- /dev/null +++ b/wordcount/tests/task_07.py @@ -0,0 +1,60 @@ +from fixtures import FakeFile, Files +from realpython import assert_equals_if, task, tutorial + + +@task( + number=7, + name="Read Data From Multiple Files", + url="https://realpython.com/lessons/wordcount-read-data-from-multiple-files-task/", +) +@tutorial("python-for-loop", "Python for Loops: The Pythonic Way") +@tutorial("python-list", "Python's list Data Type: A Deep Dive With Examples") +@tutorial( + "list-comprehension-python", "When to Use a List Comprehension in Python" +) +class Test: + def test_displays_counts_and_filenames_on_separate_lines( + self, wc, medium_files + ): + assert wc(*medium_files.paths).startswith(medium_files.file_lines) + + def test_includes_a_summary_with_total_counts(self, wc, medium_files): + assert wc(*medium_files.paths).endswith(medium_files.total_line) + + def test_can_repeat_the_same_file_multiple_times(self, wc, file1): + files = Files([file1, file1, file1]) + assert_equals_if(files.expected, wc(*files.paths)) + + def test_can_mix_files_with_standard_input(self, wc, file2): + dash = FakeFile(b"caffe latte", (0, 2, 11, 11)) + files = Files([file2, dash]) + assert_equals_if(files.expected, wc(*files.paths, stdin=dash.content)) + + def test_reports_a_directory_and_a_missing_file( + self, wc, fake_dir, random_name + ): + assert_equals_if( + b"".join( + [ + f"0 0 0 {fake_dir}/ (is a directory)\n".encode(), + f"0 0 0 {random_name} (no such file or directory)\n".encode(), + b"0 0 0 total\n", + ] + ), + wc(fake_dir, random_name), + ) + + def test_reports_a_mix_of_all(self, wc, fake_dir, random_name, small_file): + expected = b"".join( + [ + f"0 0 0 {fake_dir}/ (is a directory)\n".encode(), + small_file.format_line(), + f"0 0 0 {random_name} (no such file or directory)\n".encode(), + b"0 1 3\n", + b"1 2 9 total\n", + ] + ) + assert_equals_if( + expected, + wc(fake_dir, str(small_file.path), random_name, "-", stdin=b"hot"), + ) diff --git a/wordcount/tests/task_08.py b/wordcount/tests/task_08.py new file mode 100644 index 0000000000..5de8dc8827 --- /dev/null +++ b/wordcount/tests/task_08.py @@ -0,0 +1,57 @@ +from realpython import assert_equals, task, tutorial + + +@task( + number=8, + name="Ensure Consistent Number Formatting", + url="https://realpython.com/lessons/wordcount-consistent-formatting-task/", +) +@tutorial("python-data-classes", "Data Classes in Python (Guide)") +@tutorial( + "python-multiple-constructors", + "Providing Multiple Constructors in Your Python Classes", +) +@tutorial( + "python-classes", + "Python Classes: The Power of Object-Oriented Programming", +) +@tutorial( + "python-magic-methods", + "Python's Magic Methods: Leverage Their Power in Your Classes", +) +@tutorial( + "python-property", + "Python's property(): Add Managed Attributes to Your Classes", +) +@tutorial( + "python-repr-vs-str", + "When Should You Use .__repr__() vs .__str__() in Python?", +) +@tutorial("python-namedtuple", "Write Pythonic and Clean Code With namedtuple") +class Test: + def test_uses_consistent_formatting_across_lines( + self, wc, small_file, unicode_file, big_file, fake_dir, random_name + ): + expected = b"".join( + [ + small_file.format_line(max_digits=3), + b" 0 2 10\n", + unicode_file.format_line(max_digits=3), + f" 0 0 0 {fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + big_file.format_line(max_digits=3), + f" 0 0 0 {random_name} (no such file or directory)\n".encode(), + b" 8 75 490 total\n", + ] + ) + actual = wc( + str(small_file.path), + "-", + str(unicode_file.path), + fake_dir, + "-", + str(big_file.path), + random_name, + stdin=b"flat white", + ) + assert_equals(expected, actual) diff --git a/wordcount/tests/task_09.py b/wordcount/tests/task_09.py new file mode 100644 index 0000000000..9dd0970ef2 --- /dev/null +++ b/wordcount/tests/task_09.py @@ -0,0 +1,224 @@ +from itertools import permutations + +from realpython import assert_equals, task, tutorial + + +@task( + number=9, + name="Select Counts With Command-Line Options", + url="https://realpython.com/lessons/wordcount-select-counts-task/", +) +@tutorial("python-bitwise-operators", "Bitwise Operators in Python") +@tutorial( + "command-line-interfaces-python-argparse", + "Build Command-Line Interfaces With Python's `argparse`", +) +@tutorial("python-enum", "Build Enumerations of Constants With Python's Enum") +@tutorial("lru-cache-python", "Caching in Python Using the LRU Cache Strategy") +@tutorial( + "python-built-in-functions", + "Managing Attributes: `getattr()`, `setattr()`, and `delattr()`", + section="managing-attributes-getattr-setattr-and-delattr", +) +@tutorial( + "python-or-operator", + "Short-Circuit Evaluation", + section="short-circuit-evaluation", +) +class Test: + def test_counts_lines_words_bytes_by_default(self, wc, runner): + """Counts lines, words, and bytes by default""" + assert_equals( + expected=b" 3 4 23\n", + actual=wc(stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3), + b" 0 2 10\n", + runner.file2.format_line(max_digits=3), + f" 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + runner.file3.format_line(max_digits=3), + f" 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 490 total\n", + ] + ), + actual=runner(), + ) + + def test_counts_lines_words_bytes_explicitly(self, wc, runner): + """Counts lines, words, and bytes explicitly""" + flags = ["--lines", "--words", "--bytes"] + assert_equals( + expected=b" 3 4 23\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=13), + b" 0 2 10\n", + runner.file2.format_line(max_digits=3, selected=13), + f" 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + runner.file3.format_line(max_digits=3, selected=13), + f" 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_only_counts_lines(self, wc, runner): + flags = ["--lines"] + assert_equals( + expected=b"3\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(selected=8), + b"0\n", + runner.file2.format_line(selected=8), + f"0 {runner.fake_dir}/ (is a directory)\n".encode(), + b"0\n", + runner.file3.format_line(selected=8), + f"0 {runner.random_name} (no such file or directory)\n".encode(), + b"8 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_only_counts_words(self, wc, runner): + flags = ["--words"] + assert_equals( + expected=b"4\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=2, selected=4), + b" 2\n", + runner.file2.format_line(max_digits=2, selected=4), + f" 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0\n", + runner.file3.format_line(max_digits=2, selected=4), + f" 0 {runner.random_name} (no such file or directory)\n".encode(), + b"75 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_only_counts_bytes(self, wc, runner): + flags = ["--bytes"] + assert_equals( + expected=b"23\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=1), + b" 10\n", + runner.file2.format_line(max_digits=3, selected=1), + f" 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0\n", + runner.file3.format_line(max_digits=3, selected=1), + f" 0 {runner.random_name} (no such file or directory)\n".encode(), + b"490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_lines_and_words(self, wc, runner): + flags = ["--lines", "--words"] + assert_equals( + expected=b"3 4\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=2, selected=12), + b" 0 2\n", + runner.file2.format_line(max_digits=2, selected=12), + f" 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0\n", + runner.file3.format_line(max_digits=2, selected=12), + f" 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_lines_and_bytes(self, wc, runner): + flags = ["--lines", "--bytes"] + assert_equals( + expected=b" 3 23\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=9), + b" 0 10\n", + runner.file2.format_line(max_digits=3, selected=9), + f" 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0\n", + runner.file3.format_line(max_digits=3, selected=9), + f" 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_words_and_bytes(self, wc, runner): + flags = ["--words", "--bytes"] + assert_equals( + expected=b" 4 23\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=5), + b" 2 10\n", + runner.file2.format_line(max_digits=3, selected=5), + f" 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0\n", + runner.file3.format_line(max_digits=3, selected=5), + f" 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 75 490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_always_displays_counts_in_the_same_order(self, wc, runner): + expected = b"".join( + [ + runner.file1.format_line(max_digits=3), + b" 0 2 10\n", + runner.file2.format_line(max_digits=3), + f" 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + runner.file3.format_line(max_digits=3), + f" 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 490 total\n", + ] + ) + for flags in permutations(["--lines", "--words", "--bytes"]): + assert_equals( + expected=b" 3 4 23\n", + actual=wc(*flags, stdin=b"caffe\nlatte\nflat white\n"), + ) + assert_equals(expected=expected, actual=runner(*flags)) diff --git a/wordcount/tests/task_10.py b/wordcount/tests/task_10.py new file mode 100644 index 0000000000..f317b3d331 --- /dev/null +++ b/wordcount/tests/task_10.py @@ -0,0 +1,216 @@ +from itertools import permutations + +import pytest +from realpython import TEST_TIMEOUT_SECONDS, assert_equals, task + + +@task( + number=10, + name="Add Support for Counting the Characters", + url="https://realpython.com/lessons/wordcount-counting-characters-task/", +) +class Test: + def test_only_counts_characters(self, wc, runner): + flags = ["--chars"] + assert_equals( + expected=b"18\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮\nja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=2), + b" 10\n", + runner.file2.format_line(max_digits=3, selected=2), + f" 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0\n", + runner.file3.format_line(max_digits=3, selected=2), + f" 0 {runner.random_name} (no such file or directory)\n".encode(), + b"481 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_characters_and_bytes(self, wc, runner): + flags = ["--chars", "--bytes"] + assert_equals( + expected=b"18 27\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=3), + b" 10 10\n", + runner.file2.format_line(max_digits=3, selected=3), + f" 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0\n", + runner.file3.format_line(max_digits=3, selected=3), + f" 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b"481 490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_words_and_characters(self, wc, runner): + flags = ["--words", "--chars"] + assert_equals( + expected=b" 3 18\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=6), + b" 2 10\n", + runner.file2.format_line(max_digits=3, selected=6), + f" 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0\n", + runner.file3.format_line(max_digits=3, selected=6), + f" 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 75 481 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_words_characters_bytes(self, wc, runner): + """Counts words, characters, and bytes""" + flags = ["--words", "--chars", "--bytes"] + assert_equals( + expected=b" 3 18 27\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=7), + b" 2 10 10\n", + runner.file2.format_line(max_digits=3, selected=7), + f" 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + runner.file3.format_line(max_digits=3, selected=7), + f" 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 75 481 490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_lines_and_characters(self, wc, runner): + flags = ["--lines", "--chars"] + assert_equals( + expected=b" 2 18\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=10), + b" 0 10\n", + runner.file2.format_line(max_digits=3, selected=10), + f" 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0\n", + runner.file3.format_line(max_digits=3, selected=10), + f" 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 481 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_lines_characters_bytes(self, wc, runner): + """Counts lines, characters, and bytes""" + flags = ["--lines", "--chars", "--bytes"] + assert_equals( + expected=b" 2 18 27\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=11), + b" 0 10 10\n", + runner.file2.format_line(max_digits=3, selected=11), + f" 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + runner.file3.format_line(max_digits=3, selected=11), + f" 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 481 490 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_lines_words_characters(self, wc, runner): + """Counts lines, words, and characters""" + flags = ["--lines", "--words", "--chars"] + assert_equals( + expected=b" 2 3 18\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=14), + b" 0 2 10\n", + runner.file2.format_line(max_digits=3, selected=14), + f" 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0\n", + runner.file3.format_line(max_digits=3, selected=14), + f" 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 481 total\n", + ] + ), + actual=runner(*flags), + ) + + def test_counts_lines_words_characters_bytes(self, wc, runner): + """Counts lines, words, characters, and bytes""" + flags = ["--lines", "--words", "--chars", "--bytes"] + assert_equals( + expected=b" 2 3 18 27\n", + actual=wc(*flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8")), + ) + assert_equals( + expected=b"".join( + [ + runner.file1.format_line(max_digits=3, selected=15), + b" 0 2 10 10\n", + runner.file2.format_line(max_digits=3, selected=15), + f" 0 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0 0\n", + runner.file3.format_line(max_digits=3, selected=15), + f" 0 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 481 490 total\n", + ] + ), + actual=runner(*flags), + ) + + @pytest.mark.timeout(TEST_TIMEOUT_SECONDS * 4) + def test_always_displays_counts_in_the_same_order(self, wc, runner): + expected = b"".join( + [ + runner.file1.format_line(max_digits=3, selected=15), + b" 0 2 10 10\n", + runner.file2.format_line(max_digits=3, selected=15), + f" 0 0 0 0 {runner.fake_dir}/ (is a directory)\n".encode(), + b" 0 0 0 0\n", + runner.file3.format_line(max_digits=3, selected=15), + f" 0 0 0 0 {runner.random_name} (no such file or directory)\n".encode(), + b" 8 75 481 490 total\n", + ] + ) + for flags in permutations( + ["--lines", "--words", "--chars", "--bytes"] + ): + assert_equals( + expected=b" 2 3 18 27\n", + actual=wc( + *flags, stdin="za偶贸艂膰\ng臋艣l膮 ja藕艅\n".encode("utf-8") + ), + ) + assert_equals(expected=expected, actual=runner(*flags))