From 39b4f32cf9bd9abf5956672445b7488931166848 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Tue, 11 Mar 2025 08:52:30 +0100 Subject: [PATCH 01/17] docs: Add documentation about benchmarks and campaigns --- README.md | 1 + docs/README.md | 7 ++ docs/benchmark.md | 180 ++++++++++++++++++++++++++++++++++++++++ docs/campaign.md | 142 +++++++++++++++++++++++++++++++ docs/getting_started.md | 54 ++++++++++++ 5 files changed, 384 insertions(+) create mode 100644 docs/README.md create mode 100644 docs/benchmark.md create mode 100644 docs/campaign.md create mode 100644 docs/getting_started.md diff --git a/README.md b/README.md index 9537a835..350fa63f 100644 --- a/README.md +++ b/README.md @@ -113,6 +113,7 @@ iterate over the variables and their values using the cartesian product (or other supported or custom methods), run the benchmark and output the results in a format that is friendly with plotting tools. +For further documentation, see the [documentation](docs/README.md) ## Getting Started diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..af496484 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,7 @@ +# Documentation + +**This documentation is incomplete, please feel free to change or expand it.** + +* [getting started](getting_started.md) +* [campaign](campaign.md) +* [benchmark](benchmark.md) diff --git a/docs/benchmark.md b/docs/benchmark.md new file mode 100644 index 00000000..6b3d9719 --- /dev/null +++ b/docs/benchmark.md @@ -0,0 +1,180 @@ +# Benchmarks + +A benchmark is what will compile and run your code, using the variables given by the campaign. +Because each project needs to be compiled and ran differently, there is currently no benchmark implementation that can be used, for this reason you should always make a custom benchmark. + +To make a custom benchmark you need to override the `Benchmark` class, and implement the required methods. +Below is an example implementation of a custom benchmark, for more info see [building](#Building the benchmark) and [running](#Running the benchmark). +```python +from benchkit.campaign import Benchmark +from benchkit.utils.dir import get_curdir + + +# The code that should be benchmarked is oftentimes relatively to the location of the current file, which can be gotten using the following method +_bench_src_path = get_curdir(__file__) +_build_dir = _bench_src_path / "build" + +class MyBenchark(Benchmark): + # Init method, setup all of the required variables + def __init__( + self, + ) -> None: + # Init method, calls the `__init__` function of the `Benchmark` class + super().__init__( + command_wrappers=(), + command_attachments=(), + shared_libs=(), + pre_run_hooks=(), + post_run_hooks=(), + ) + + @property + def bench_src_path(self) -> pathlib.Path: + # Returns the path to the source code that needs to be benchmarked + return _bench_src_path + + # Return all of the variables, given to the campaign, that are required to build the source code. + # These are the only variables that will be given to the `build_bench` method. + @staticmethod + def get_build_var_names() -> List[str]: + # TODO: Add your build variables here + return ["importantBuildVariable", "importantVariable"] + + # Return all of the variables, given to the campaign, that are required to run the benchmark. + # These are the only variables that will be given to the `single_run` method. + @staticmethod + def get_run_var_names() -> List[str]: + # TODO: Add your run variables here + return ["importantRunVariable", "importantVariable"] + + # Deprecated, but since it is still called inside of the framework, it should still be overwritten, but only an empty array should be returned. + @staticmethod + def get_tilt_var_names() -> List[str]: + return [] + + # Build the source code using the values required for the current experiment + def build_bench( + self, + # The variables defined in `get_build_var_names`, which values are given by the campaign + # TODO: Add your build variables (defined in `get_build_var_names`) here. + importantBuildVariable, + importantVariable + # The constants given to the campaign + constants, + # Holds all of the variables that are given to this method, but not used + **_kwargs, + ) -> None: + # Remove the build directory before rebuilding + if _build_dir.is_dir() and len(str(_build_dir)) > 4: + shutil.rmtree(str(_build_dir)) + + # Create build directory + self.platform.comm.makedirs(path=_build_dir, exist_ok=True) + + # The command used to compile the code, each argument should be its own string in the array. + # e.g. To compile a single file using `gcc` this should be `["gcc", path_to_file]` + # TODO: add your build command here + compile_command = [ ] + # Run the command inside of the build directory + self.platform.comm.shell( + command=compile_command, + # The command is executed inside of the build directory + current_dir=_build_dir, + ) + + # Run the benchmark once + def single_run( + self, + # The variables defined in `get_run_var_names`, which values are given by the campaign + # TODO: Add your run variables (defined in `get_run_var_names`) here. + importantRunVariable, + importantVariable + # The constants given to the campaign + constants, + # Holds all of the variables that are given to this method, but not used + **kwargs, + ) -> str | AsyncProcess: + # The command used to run the benchmark, each argument should be its own string in the array. + # TODO: add your run command here + run_command = [ ] + + # Run the benchmark in the build directory + output = self.run_bench_command( + run_command=run_command, + wrapped_run_command=run_command, + current_dir=_build_dir, + environment=None, + wrapped_environment=None, + print_output=True, + ) + return output + + # Parse the output from an experiment, and turn it into a dictionary, any information that is returned from this method will be added to the output `csv` file + def parse_output_to_results( + self, + command_output: str, + build_variables: Dict[str, Any], + run_variables: Dict[str, Any], + **kwargs, + ) -> Dict[str, Any]: + # This assumes that each experiments prints a single line of `csv` code, with all of the information used in that run + key_seq_values = command_output.strip().split(";") + result_dict = dict(map(lambda s: s.split("="), key_seq_values)) + return result_dict +``` +This benchmark can then be used for a [campaign](campaign.md). + +> [!NOTE] +> The above is an, incomplete, example implementation that should be adapted to your own use case. +> To facilitate this, `TODO`s have been added where you should change the implementation of the class to fit your needs. + +> [!NOTE] +> Because of the definition of `parse_output_results` this benchmarking class expects a single output line of `scv` code as extra information. +> If this is not what is outputted by your results, either change the output, or change the definition of `parse_output_to_results`. + +## Building the benchmark + +To build your benchmark code you have to implement the `get_build_var_names` and `build_bench` functions in your `Benchmark` class. + +The `get_build_var_names` function should return a list of all of the variables that are used inside of the `build_bench` function. +The values for these variables will be supplied by the [campaign](campaign.md). + +The `build_bench` function should compile your benchmarking code. +Note that this function will be called every time an experiment with different build variables (as defined by `get_build_var_names`) is started, this mean that you build folder might already contain some build artifacts and should be cleaned. + +The `build_bench` function should compile your benchmarking code, and is called with the following arguments: +* `self` + * the self of the class +* `benchmark_duration_seconds` + * How long a single benchmark should take, or `None` +* `constants` + * The constants for your benchmark, this is given by the [campaign](campaign.md). +* The variables returned by `get_build_var_names` + * These are the variables that vary between experiments, and are given by the [campaign](campaign.md). +If you don't need some of these variables, you can use the `**kwargs` which will contain all of the arguments that you do not specify inside of the function. + +## Running the benchmark + +To run your benchmark code you have to implement the `get_run_var_names` and `single_run` functions in your `Benchmark` class. + +The `get_run_var_names` function should return a list of all of the variables that are used inside of the `single_run` function. +The values for these variables will be supplied by the [campaign](campaign.md). + +The `single_run` function should run a single experiment, and return either the console output, or the asynchronous process if the program is ran asynchronously. + +The `single_run` function should run your compiled benchmark code, and is called with the following arguments: +* `self` + * the self of the class +* `benchmark_duration_seconds` + * How long a single benchmark should take, or `None` +* `constants` + * The constants for your benchmark, this is given by the [campaign](campaign.md). +* `build_variables` + * The variables returned by `get_build_var_names` +* `record_data_dir` + * The directory where the results of this experiment will be stored +* `other_variables` + * The variables neither returned by `get_run_var_names` or by `get_build_var_names` +* The variables returned by `get_run_var_names` + * These are the variables that vary between experiments, and are given by the [campaign](campaign.md). +If you don't need some of these variables, you can use the `**kwargs` which will contain all of the arguments that you do not specify inside of the function. diff --git a/docs/campaign.md b/docs/campaign.md new file mode 100644 index 00000000..9c1079de --- /dev/null +++ b/docs/campaign.md @@ -0,0 +1,142 @@ +# Campaigns + +A campaign is the thing that runs your [benchmark](benchmark.md), a single campaign will run a single benchmark, but run it multiple times, using different build- and runtime-variables. + +Inside of `benchkit` there are already three campaigns implemented, below all three of these are explained + +The first of the campaigns is called `CampaignCartesianProduct`, will run an experiment for all sets of values for the variables, gotten when the Cartesian product is taken. +This mean that each combination of values will be tested, using this campaign. +For example, if you have the variables `var1` which can equal `1` or `2` and the variable `var2` which can equal `3` or `4`, you will get experiments with the following values. +``` +var1 = 1; var2 = 3 +var1 = 1; var2 = 4 +var1 = 2; var2 = 3 +var1 = 2; var2 = 4 +``` +This kind of campaign can be useful when you want to test every possible combination of values, but it can make the amount of experiments grow very large. + +The second kind of campaign is the `CampaignIterateVariables`, will run an experiment for every pre-defined set of values for the given variables. +This gives you a larger amount of control over the variables that will be combined, ensuring that the amount of experiments ran will not grow too large, even when using a large amount of variables. + +The last campaign is the `CampaignSuite`, this is a campaign that wraps other campaigns, instead of directly running experiments. +This campaign takes multiple other campaigns, of any sort, and will run them, one after another. +Note that, since `CampaignSuite` itself is not a subclass of `Campaign`, you cannot create a `CampaignSuite` that will run other `CampaignSuite`s. + +## Creating a campaign + +Creating a `CampaignCartesianProduct` and a `CampaignIterateVariables` can be done in very similar ways, because the two campaigns only differ in the way that they treat their variables, for this reason we will explain creating the two of them at the same time. + +```python +from benchkit.campaign import CampaignCartesianProduct + +campaign = CampaignCartesianProduct( + # The name of your benchmark + name="benchmark_name", + # The benchmark to use + benchmark=Benchmark(), + # The amount of times each experiment should be ran + nb_runs=3, + # The variables that should be used for the experiments, this is the only thing that differs between `CampaignCartesianProduct` and `CampaignIterateVariables` + variables={ }, + # This is a variable that remains constant throughout all of the experiments that are ran in this campaign + constants={"importantVariable": 5}, + # Wether or not debugging should be turned on, the actual implementation of the debugging is handled by the benchmark + debug=False, + # Wether or not gdb should be used, the way how gdb is used is handled by the benchmark + gdb=False, + # Wether to enable data directories for this campaign, see [results](#results) for more info + enable_data_dir=False, + # How to pretty print variables, this will replace certain variable values with more meaningful, values. This is only used to print certain variables in different ways. + pretty={"importantVaryingVariable": {5: "five", 6: "six"}}, + ## Optional variables + # Can be used to limit the length that an experiment is allowed to run, actually limiting the experiment length should be implemented by the benchmark. + benchmark_duration_seconds = None, # Set to e.g. 5. + # + continuing = False +) +``` + +The above code snippet shows you how to initialize a campaign, the only thing that differs between `CampaignCartesianProduct` and `CampaignIterateVariables` is the `variables` argument. + +For `CampaignCartesianProduct` this argument requires a dictionary where each variable that should vary is assigned an array with all of its possible values. +```python +variables = { + "var1" = [1, 2, 3], + "var2" = [4, 5, 6], + "var3" = ["very", "important", "values"] +} +``` +Using this dictionary for the variables in `CampaignCartesianProduct` will run `27` experiments, combining the three variables in every possible way. + +For `CampaignIterateVariables` this argument requires an array of dictionaries, each assigning a value to all of the values that can vary. +```python +variables = [ + { + "var1" = 1, + "var2" = 5, + "var3" = "very", + }, + { + "var1" = 2, + "var2" = 5, + "var3" = "important", + }, + { + "var1" = 2, + "var2" = 4, + "var3" = "values", + } +] +``` +Using this array for the variables in `CampaignIterateVariables` will run `3` experiments, each one using one dictionary to assign a value to every variable. + +Create a `CampaignSuite` can be done by initializing it with a list of campaigns. +```python +from benchkit.campaign import CampaignSuite + +# Add your campaigns here +campaigns = [...] +suite = CampaignSuite(campaigns = campaigns) +``` +This will create a new `CampaignSuite` that will run all of the campaigns inside of `campaigns` when ran. + +## Running a campaign + +To run a campaign, you call the `run` method on that campaign. +```python +campaign.run() +``` + +If you want to run a campaign suite, you call the `run_suite` method on that suite. +```python +suite.run_suite() +``` +This method also accepts a `parallel` argument, this is `false` by default, but when set to `true` the different campaigns inside of the suite will be ran in parallel. +```python +suite.run_suite(parallel=True) +``` + +You can also call the method `print_durations` on a suite to ensure that, while running, the time it took to run an experiment, and the expected time required to finish the campaign suite will be printed to the terminal. + +## Results + +When running a benchmark, all of the results will be put into the `results` folder. +When `enable_data_dir` is disabled, all files will be placed directly into the this folder, otherwise each campaign will get its own folder, and in those folders each variable and run will also be given their own folder with more information about that particular run. + +The results are placed inside of `csv` files, with information stored about the system and benchmark configuration as comments, and the actual results stored as data inside of the `csv`. +This can look like the following + +### Graphs + +When making graphs, you are required to enable data directories, this can be done by setting `enable_data_dirs` to `True` when creating the campaign, for more info see [results](#Results). + +`benchkit` also allows you to make graphs from the data that is collected. +To do this you can run `generate_graph` on a finished campaign, or `generate_graphs` on a campaign suite to create a graph for each campaign, both of these methods take the same arguments. + +These functions only require a `plot_name` as an argument, which is the name of the [`seaborn`](https://seaborn.pydata.org/) plot that should be generated. +Afterwards you can pass optional arguments accepted by [`matplotlib`](https://matplotlib.org/), if the value of these arguments is the name of one of your variables (as [given](#Creating a campaign)) then `benchkit` will automatically give the correct values for that variable to [`matplotlib`](https://matplotlib.org/). +This can be seen in the following example: +```python +suite.generate_graphs(plot_name="lineplot", x="nb_threads", y="duration", hue="elements"); +``` +This example will generate a [line plot](https://seaborn.pydata.org/generated/seaborn.lineplot.html) for every campaign in the given suite where the `x`-axis contains the amount of threads used, the `y`-axis the time it took to finish the experiment, and a different line will be created, with a different color, for each value of the variable `elements`. diff --git a/docs/getting_started.md b/docs/getting_started.md new file mode 100644 index 00000000..b1e47d50 --- /dev/null +++ b/docs/getting_started.md @@ -0,0 +1,54 @@ +# Getting started + +## Installation + +You can install this framework using [pypy](https://pypi.org/project/pybenchkit/): +```bash +pip install pybenchkit +``` + +## Getting started + +In order to run a benchmark, you need a [campaign](campaign.md), and you need a [benchmark](benchmark.md). +Once you have those two things, you can use these to run your benchmark in the following way. + +```python +from benchkit.campaign import CampaignCartesianProduct + +def runCampaign(): + # Define the campaign, here `CampaignCartesianProduct` is used, but you can also use your own campaign + campaign = CampaignCartesianProduct( + name="benchmark", + # Create a new `benchmark` class, you should add your own here. + benchmark=Benchmark(), + # How many times every experiment should be repeated + nb_runs=3, + variables={ + # Vary the amount of threads that is used + "nb_threads": [2, 4, 8], + }, + # Variables that can be used in your benchmark, but always remain constant + constants={}, + debug=False, + gdb=False, + enable_data_dir=False, + pretty={}, + ) + # Run the campaign + campaign.run() +``` + +This will create a new campaign and run it, the results of this campaign will then be stored in the `results` folder. +If you also want the data plotted you can do this by running `campaign.generate_graph(plot_name="name")`, this graph will then also be placed in the `results` folder. +Generating this graph does, however, require some extra Python libraries, these can be installed using the following command: +```python +pip install matplotlib pandas seaborn +``` +For more info about how to use campaigns, and their arguments, see [campaign](campaign.md). + +### Running the benchmark + +To run the benchmark you have to simple call the python file in an environment where all of the dependencies have been this can, assuming that you python file is called `benchmark.py`, you can start your benchmarks using the following command: +```bash +python benchmark.py +``` From 9b0e07a7c8fe49024d5017740c77f2ccdb3f1776 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 12 Mar 2025 18:15:00 +0100 Subject: [PATCH 02/17] docs: Fix typos --- docs/benchmark.md | 6 +++--- docs/campaign.md | 24 ++++++++++++------------ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index 6b3d9719..90f7da25 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -14,7 +14,7 @@ from benchkit.utils.dir import get_curdir _bench_src_path = get_curdir(__file__) _build_dir = _bench_src_path / "build" -class MyBenchark(Benchmark): +class MyBenchmark(Benchmark): # Init method, setup all of the required variables def __init__( self, @@ -58,7 +58,7 @@ class MyBenchark(Benchmark): # The variables defined in `get_build_var_names`, which values are given by the campaign # TODO: Add your build variables (defined in `get_build_var_names`) here. importantBuildVariable, - importantVariable + importantVariable, # The constants given to the campaign constants, # Holds all of the variables that are given to this method, but not used @@ -88,7 +88,7 @@ class MyBenchark(Benchmark): # The variables defined in `get_run_var_names`, which values are given by the campaign # TODO: Add your run variables (defined in `get_run_var_names`) here. importantRunVariable, - importantVariable + importantVariable, # The constants given to the campaign constants, # Holds all of the variables that are given to this method, but not used diff --git a/docs/campaign.md b/docs/campaign.md index 9c1079de..beb315da 100644 --- a/docs/campaign.md +++ b/docs/campaign.md @@ -61,9 +61,9 @@ The above code snippet shows you how to initialize a campaign, the only thing th For `CampaignCartesianProduct` this argument requires a dictionary where each variable that should vary is assigned an array with all of its possible values. ```python variables = { - "var1" = [1, 2, 3], - "var2" = [4, 5, 6], - "var3" = ["very", "important", "values"] + "var1": [1, 2, 3], + "var2": [4, 5, 6], + "var3": ["very", "important", "values"] } ``` Using this dictionary for the variables in `CampaignCartesianProduct` will run `27` experiments, combining the three variables in every possible way. @@ -72,19 +72,19 @@ For `CampaignIterateVariables` this argument requires an array of dictionaries, ```python variables = [ { - "var1" = 1, - "var2" = 5, - "var3" = "very", + "var1": 1, + "var2": 5, + "var3": "very", }, { - "var1" = 2, - "var2" = 5, - "var3" = "important", + "var1": 2, + "var2": 5, + "var3": "important", }, { - "var1" = 2, - "var2" = 4, - "var3" = "values", + "var1": 2, + "var2": 4, + "var3": "values", } ] ``` From 5ddde38342702a69842a16cb04d2a6d8fc17ce31 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 12 Mar 2025 18:17:14 +0100 Subject: [PATCH 03/17] docs: Add missing imports to code example --- docs/benchmark.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/benchmark.md b/docs/benchmark.md index 90f7da25..8770017a 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -8,6 +8,11 @@ Below is an example implementation of a custom benchmark, for more info see [bui ```python from benchkit.campaign import Benchmark from benchkit.utils.dir import get_curdir +from benchkit.shell.shellasync import AsyncProcess + +import pathlib +import shutil +from typing import Any, Dict, List # The code that should be benchmarked is oftentimes relatively to the location of the current file, which can be gotten using the following method From e2ea5f65d1d8b92a17931b5bde53c493b70a46d4 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 12 Mar 2025 18:18:01 +0100 Subject: [PATCH 04/17] docs: Break up lines in code examples To not make the code lines be too long --- docs/campaign.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/campaign.md b/docs/campaign.md index beb315da..c2810816 100644 --- a/docs/campaign.md +++ b/docs/campaign.md @@ -36,20 +36,25 @@ campaign = CampaignCartesianProduct( benchmark=Benchmark(), # The amount of times each experiment should be ran nb_runs=3, - # The variables that should be used for the experiments, this is the only thing that differs between `CampaignCartesianProduct` and `CampaignIterateVariables` + # The variables that should be used for the experiments, this is the only thing that + # differs between `CampaignCartesianProduct` and `CampaignIterateVariables` variables={ }, - # This is a variable that remains constant throughout all of the experiments that are ran in this campaign - constants={"importantVariable": 5}, + # This is a variable that remains constant throughout all of the experiments that are + # ran in this campaign # Wether or not debugging should be turned on, the actual implementation of the debugging is handled by the benchmark + # Wether or not debugging should be turned on, the actual implementation of the debugging + # is handled by the benchmark debug=False, # Wether or not gdb should be used, the way how gdb is used is handled by the benchmark gdb=False, # Wether to enable data directories for this campaign, see [results](#results) for more info enable_data_dir=False, - # How to pretty print variables, this will replace certain variable values with more meaningful, values. This is only used to print certain variables in different ways. + # How to pretty print variables, this will replace certain variable values with more meaningful, + # values. This is only used to print certain variables in different ways. pretty={"importantVaryingVariable": {5: "five", 6: "six"}}, ## Optional variables - # Can be used to limit the length that an experiment is allowed to run, actually limiting the experiment length should be implemented by the benchmark. + # Can be used to limit the length that an experiment is allowed to run, actually limiting the + # experiment length should be implemented by the benchmark. benchmark_duration_seconds = None, # Set to e.g. 5. # continuing = False From 2621a599b940ffff8637d61a2759de133fc105f4 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 12 Mar 2025 18:18:29 +0100 Subject: [PATCH 05/17] docs: Change name of constant in example This is to avoid confusion with the examples used in `docs/benchmark.md` --- docs/campaign.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/campaign.md b/docs/campaign.md index c2810816..c43cecc4 100644 --- a/docs/campaign.md +++ b/docs/campaign.md @@ -41,7 +41,7 @@ campaign = CampaignCartesianProduct( variables={ }, # This is a variable that remains constant throughout all of the experiments that are # ran in this campaign - # Wether or not debugging should be turned on, the actual implementation of the debugging is handled by the benchmark + constants={"importantConstant": 5}, # Wether or not debugging should be turned on, the actual implementation of the debugging # is handled by the benchmark debug=False, From a4684a2cf6c6e50fc65ad1d383993eaa0d91067c Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 12 Mar 2025 18:20:05 +0100 Subject: [PATCH 06/17] docs: Change explanation of `constants` argument --- docs/benchmark.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index 8770017a..a2b20fa2 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -153,7 +153,7 @@ The `build_bench` function should compile your benchmarking code, and is called * `benchmark_duration_seconds` * How long a single benchmark should take, or `None` * `constants` - * The constants for your benchmark, this is given by the [campaign](campaign.md). + * A dictionary containing the constants of your benchmark, this is given by the [campaign](campaign.md). * The variables returned by `get_build_var_names` * These are the variables that vary between experiments, and are given by the [campaign](campaign.md). If you don't need some of these variables, you can use the `**kwargs` which will contain all of the arguments that you do not specify inside of the function. From 18f099cee8f26e99cfea5b895c005098d1b20f97 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 12 Mar 2025 18:21:41 +0100 Subject: [PATCH 07/17] docs: Fix incorrect explanation about how `parse_output_results` works --- docs/benchmark.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index a2b20fa2..e5a37b6d 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -122,7 +122,8 @@ class MyBenchmark(Benchmark): run_variables: Dict[str, Any], **kwargs, ) -> Dict[str, Any]: - # This assumes that each experiments prints a single line of `csv` code, with all of the information used in that run + # This assumes that each experiments prints lines with the following format `=`, delimited with `;` + # e.g. `var1=5;var2="value for var2"` key_seq_values = command_output.strip().split(";") result_dict = dict(map(lambda s: s.split("="), key_seq_values)) return result_dict From 413eec7f5b91b74190d87ebd6af41cbacdfc80cf Mon Sep 17 00:00:00 2001 From: Merlijn Date: Thu, 13 Mar 2025 07:18:41 +0100 Subject: [PATCH 08/17] refactor(benchmark): Return default value from `get_tilt_var_names` Using `tilt` is deprecated, so the `get_tilt_var_names` function should not longer be used, but since, every time it was called, this function returned a `notImplementedError`, this function still had to be overwritten every time you wanted to create a new benchmark class. Returning an empty list by default fixes this. --- benchkit/benchmark.py | 2 +- docs/benchmark.md | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/benchkit/benchmark.py b/benchkit/benchmark.py index ca70fc85..e0e9ac06 100644 --- a/benchkit/benchmark.py +++ b/benchkit/benchmark.py @@ -178,7 +178,7 @@ def get_tilt_var_names() -> List[str]: Returns: List[str]: the names of the tilt variables. """ - raise NotImplementedError + return [] @staticmethod def _write_to_record_data_dir( diff --git a/docs/benchmark.md b/docs/benchmark.md index e5a37b6d..f0685b66 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -52,11 +52,6 @@ class MyBenchmark(Benchmark): # TODO: Add your run variables here return ["importantRunVariable", "importantVariable"] - # Deprecated, but since it is still called inside of the framework, it should still be overwritten, but only an empty array should be returned. - @staticmethod - def get_tilt_var_names() -> List[str]: - return [] - # Build the source code using the values required for the current experiment def build_bench( self, From 7c04fe1c11f542009a7a933dd50a22381f30c90e Mon Sep 17 00:00:00 2001 From: Merlijn Date: Thu, 13 Mar 2025 12:05:08 +0100 Subject: [PATCH 09/17] docs: Add documentation about command wrappers --- docs/README.md | 1 + docs/benchmark.md | 1 + docs/wrappers.md | 168 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+) create mode 100644 docs/wrappers.md diff --git a/docs/README.md b/docs/README.md index af496484..fa975d62 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,3 +5,4 @@ * [getting started](getting_started.md) * [campaign](campaign.md) * [benchmark](benchmark.md) +* [wrappers](wrappers.md) diff --git a/docs/benchmark.md b/docs/benchmark.md index f0685b66..d2a24c51 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -26,6 +26,7 @@ class MyBenchmark(Benchmark): ) -> None: # Init method, calls the `__init__` function of the `Benchmark` class super().__init__( + # See [Command wrappers](https://github.com/open-s4c/benchkit/blob/main/docs/wrappers.md) command_wrappers=(), command_attachments=(), shared_libs=(), diff --git a/docs/wrappers.md b/docs/wrappers.md new file mode 100644 index 00000000..eaf6ae17 --- /dev/null +++ b/docs/wrappers.md @@ -0,0 +1,168 @@ +# Command wrappers + +You can also wrap a [benchmark](benchmark.md) in another command, this can be +useful if you want to reuse a certain tracing command, without +having to redefine it every time. + +To use a wrapper, you have to create your benchmark with the given +wrapper that you want to do, this can be accomplished by, when using +the example code given in [benchmark](benchmark.md), changing to `init` function to the +following: +```python +def __init__( + self, +) -> None: + # TODO: Add wrappers to this array + wrappers = [] + super().__init__( + # The wrappers that should be used for this command + command_wrappers=wrappers, + command_attachments=(), + shared_libs=(), + # See [Benchmark hooks](https://github.com/open-s4c/benchkit/blob/main/docs/hooks.md) + pre_run_hooks=(), + post_run_hooks=(), + ) +``` +This will include the wrappers in your benchmark, for more info about the wrappers +that are included into bechnkit see [Included wrappers](#Included wrappers). + +Adding the wrappers to your benchmark will not run them as your benchmark defines +how it should be ran, to run the benchmark with the wrappers you have to change the +`singe_run` function to use the included wrappers: +```python +# `single_run` method, see [benchmarks](https://github.com/open-s4c/benchkit/blob/main/docs/benchmark.md#Running the benchmark) for more info +def single_run( + self, + **kwargs +) -> str | AsyncProcess: + # TODO: add your run command here + run_command = [ ] + + # Load the environment, this is determined by the wrappers you have added + environment = self._preload_env() + # Wrap your command with the wrappers you added to your benchmark + wrapped_run_command, wrapped_environment = self._wrap_command( + run_command=run_command, + environment=environment, + **kwargs, + ) + + # Run the wrapped command + output = self.run_bench_command( + run_command=run_command, + # NOTE: Add the wrapped command here + wrapped_run_command=wrapped_run_command, + current_dir=_build_dir, + environment=environment, + # NOTE: Add the wrapped environment here + wrapped_environment=wrapped_environment, + print_output=True, + ) + return output +``` + +Running this will, for most campaigns, create a new file containing the data gathered +by the wrapper, if you want to also add this data to the output `csv` file, or add it in a +graph see [hooks](hooks.md). + +> [!NOTE] +> Most wrappers require `enable_data_dir` to be set to `True` in your [campaign](campaign.md) + +## Included wrappers + +### perf + +Runs the [`perf`](https://perf.wiki.kernel.org) Linux utility on your benchmark. +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.perf import PerfStatWrap, PerfReportWrap + +perfStatWrap = PerfStatWrap(events=["cache-misses"]) +perfReportWrap = PerfReportWrap() +``` + +There are two versions of the `perf` wrapper, `PerfStatWrap` will wrap +you command with the `perf stat` command, while `PerfReportWrap` will +wrap your command with `perf record` and `perf report`. + +When using `PerfStatWrap`, you should also pass the `events` argument, + +this argument decides with [PMU]() events `perf` should record. + +`PerfStatWrap` has the following [hooks](hooks.md): +* `post_run_hook_update_results` + +`PerfReportWrap` has the following [hooks](hooks.md): +* `post_run_hook_flamegraph` +* `post_run_hook_report` + +### ltrace + +Runs [ltrace](https://www.ltrace.org/). +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.ltrace import LtraceWrap + +ltraceWrap = LtraceWrap() +``` + +### numactl + +Runs [numactl](https://github.com/numactl/numactl). +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.numactl import NumactlWrap + +numactlWrap = NumactlWrap(membind=membind, local_alloc=local_alloc) +``` + +### strace + +Run strace +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.strace import StraceWrap + +straceWrap = StraceWrap() +``` + +### taskset + +Runs taskset. +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.taskset import TasksetWrap + +tasksetWrap = TasksetWrap() +``` + +### tracecmd + +Runs [tracecmd](https://www.trace-cmd.org/). +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.tracecmd import TraceCmdWrap + +tasksetWrap = TraceCmdWrap() +``` + +### valgrind + +Runs [valgrind](https://valgrind.org/). +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.valgrind import ValgrindWrapper + +valgrindWrapper = ValgrindWrapper() +``` + +### env + +Adds the environment variables, defined in the wrapped environment, to the command +This wrapper can be created in the following way: +```python +from benchkit.commandwrappers.env import EnvWrap + +envWrap = EnvWrap() +``` From fc39a490afbc8dcb99082cb9dcf2e77807c97ba6 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Thu, 13 Mar 2025 12:41:11 +0100 Subject: [PATCH 10/17] docs: Add documentation about benchmark hooks --- docs/README.md | 1 + docs/benchmark.md | 1 + docs/hooks.md | 62 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 docs/hooks.md diff --git a/docs/README.md b/docs/README.md index fa975d62..ec53c7d4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,3 +6,4 @@ * [campaign](campaign.md) * [benchmark](benchmark.md) * [wrappers](wrappers.md) +* [hooks](hooks.md) diff --git a/docs/benchmark.md b/docs/benchmark.md index d2a24c51..45589730 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -30,6 +30,7 @@ class MyBenchmark(Benchmark): command_wrappers=(), command_attachments=(), shared_libs=(), + # See [Benchmark hooks](https://github.com/open-s4c/benchkit/blob/main/docs/hooks.md) pre_run_hooks=(), post_run_hooks=(), ) diff --git a/docs/hooks.md b/docs/hooks.md new file mode 100644 index 00000000..9284cb04 --- /dev/null +++ b/docs/hooks.md @@ -0,0 +1,62 @@ +# Benchmark hooks + +Hooks are functions that will be called before or after every experiment of a [benchmark](benchmark.md). +This is often used by [wrappers](wrappers.md) to change the way your program is going to run, +or to change the output of your [benchmark](benchmark.md) to include the output of what the +wrapper has added. +Any hook that is added to a [benchmark](benchmark.md) will be ran automatically +by benchkit. + +To use a hook, you have to create your benchmark with the hooks that you want to add, +this can be accomplished by changing the `init` function to the following, assuming +you are starting from the example code given in [benchmark](benchmark.md): +```python +def __init__( + self, +) -> None: + # TODO: Add hooks to these arrays + pre_run_hooks = [] + post_run_hooks = [] + super().__init__( + # See [Benchmark wrappers](https://github.com/open-s4c/benchkit/blob/main/docs/benchmark.md) + command_wrappers=(), + command_attachments=(), + shared_libs=(), + # Add the hooks here + pre_run_hooks=pre_run_hooks, + post_run_hooks=post_run_hooks, + ) +``` +To give an example, below is how you would add the `perf` [wrapper](wrappers.md#perf), +with one of its hooks: +```python +from benchkit.commandwrappers.perf import PerfStatWrap + +def __init__( + self, +) -> None: + perfStatWrap = PerfStatWrap(events=["cache-misses"]) + # This will add the results of `perf` to the final results outputted by benchkit + post_run_hooks = [perfStatWrap.post_run_hook_update_results] + + super().__init__( + command_wrappers=[perfStatWrap], + command_attachments=(), + shared_libs=(), + pre_run_hooks=(), + post_run_hooks=post_run_hooks, + ) +``` + +## Pre-run hooks + +A pre-run hook is a function that is called before running the actual experiment, +with all of the variables that are used in your campaign. + +## Post-run hooks + +A post-run hook is a function that is called after running the experiment, +with the results of the experiment, and the data folder. +If the hook returns a dictionary, the values in this dictionary will be added +to the results of your benchmark and will therefore be added to the final `CSV` +file, and you will be able to use their values in [graphs](campaign.md#graphs). From 398a17ef2ca71ba7c0d3ec5deec84ef78b7865f1 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Fri, 14 Mar 2025 08:31:48 +0100 Subject: [PATCH 11/17] docs: Reword usage of `generate_graph` --- docs/campaign.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/campaign.md b/docs/campaign.md index c43cecc4..88e972ef 100644 --- a/docs/campaign.md +++ b/docs/campaign.md @@ -133,10 +133,12 @@ This can look like the following ### Graphs -When making graphs, you are required to enable data directories, this can be done by setting `enable_data_dirs` to `True` when creating the campaign, for more info see [results](#Results). - `benchkit` also allows you to make graphs from the data that is collected. To do this you can run `generate_graph` on a finished campaign, or `generate_graphs` on a campaign suite to create a graph for each campaign, both of these methods take the same arguments. +Calling `generate_graph` on a campaign suite will generate a graph using the combined results of all the campaigns inside of the suite. + +> [!NOTE] +> When making graphs, you are required to enable data directories, this can be done by setting `enable_data_dirs` to `True` when creating the campaign, for more info see [results](#Results). These functions only require a `plot_name` as an argument, which is the name of the [`seaborn`](https://seaborn.pydata.org/) plot that should be generated. Afterwards you can pass optional arguments accepted by [`matplotlib`](https://matplotlib.org/), if the value of these arguments is the name of one of your variables (as [given](#Creating a campaign)) then `benchkit` will automatically give the correct values for that variable to [`matplotlib`](https://matplotlib.org/). From b3279eb04057f36fde9258bc94be077e920d3a7f Mon Sep 17 00:00:00 2001 From: Merlijn Date: Fri, 14 Mar 2025 08:32:22 +0100 Subject: [PATCH 12/17] docs: Generate plots without rerunning experiments --- docs/campaign.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/docs/campaign.md b/docs/campaign.md index 88e972ef..7a17d3e0 100644 --- a/docs/campaign.md +++ b/docs/campaign.md @@ -147,3 +147,40 @@ This can be seen in the following example: suite.generate_graphs(plot_name="lineplot", x="nb_threads", y="duration", hue="elements"); ``` This example will generate a [line plot](https://seaborn.pydata.org/generated/seaborn.lineplot.html) for every campaign in the given suite where the `x`-axis contains the amount of threads used, the `y`-axis the time it took to finish the experiment, and a different line will be created, with a different color, for each value of the variable `elements`. + +If you want to generate a different plot after finishing your experiments, without rerunning them, you can use the `generate_chart_from_single_csv` function. +This function takes the same arguments as `generate_graph` but with the following extra arguments: +* `csv_pathname` + * Type: `PathType` + * The path to the `CSV` file from which it can read the data +* `output_dir` + * Type: `PathType` + * Default: `"/tmp/figs"` + * The directory in which it should place the new graph +* `prefix` + * Type: `str` + * Default: `""` + * A prefix for the name of the generated file, the eventual filename will be `f"benchkit-{prefix}{timestamp}-{figure_id}.png"` +* `nan_replace` + * Type: `bool` + * Default: `True` + * If `True`, replace all the `None`s in the data with `NaN` +* `process_dataframe` + * Type: `DataframeProcessor` + * Default: `identical_dataframe` + * A function that can modifies the dataframe before using it + +This means you can generate a new graph, based on a given benchmark file without +having to rerun your experiment using the following code: +```python +from benchkit.lwchart import generate_chart_from_single_csv + +generate_chart_from_single_csv( + "results/.csv", + plot_name="histplot", + prefix="important_experiment-" + output_dir="results/", +) +``` +Note that, this graph will not include the name of the campaign that was run, +if you want to add this you have to set the `prefix` argument. From e157a4a749b20e9fe9e8b544ae97ad0859150ba7 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Fri, 14 Mar 2025 13:43:19 +0100 Subject: [PATCH 13/17] docs: Change `matplotlib` to `seaborn` --- docs/campaign.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/campaign.md b/docs/campaign.md index 7a17d3e0..06b65132 100644 --- a/docs/campaign.md +++ b/docs/campaign.md @@ -141,7 +141,7 @@ Calling `generate_graph` on a campaign suite will generate a graph using the com > When making graphs, you are required to enable data directories, this can be done by setting `enable_data_dirs` to `True` when creating the campaign, for more info see [results](#Results). These functions only require a `plot_name` as an argument, which is the name of the [`seaborn`](https://seaborn.pydata.org/) plot that should be generated. -Afterwards you can pass optional arguments accepted by [`matplotlib`](https://matplotlib.org/), if the value of these arguments is the name of one of your variables (as [given](#Creating a campaign)) then `benchkit` will automatically give the correct values for that variable to [`matplotlib`](https://matplotlib.org/). +Afterwards you can pass optional arguments accepted by [`seaborn`](https://seaborn.pydata.org/), if the value of these arguments is the name of one of your variables (as [given](#Creating a campaign)) then `benchkit` will automatically give the correct values for that variable to [`seaborn`](https://seaborn.pydata.org/). This can be seen in the following example: ```python suite.generate_graphs(plot_name="lineplot", x="nb_threads", y="duration", hue="elements"); From 1355b0cb134e8b5097c4e0da433c896fd1f6aa23 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Fri, 14 Mar 2025 14:02:40 +0100 Subject: [PATCH 14/17] Revert "refactor(benchmark): Return default value from `get_tilt_var_names`" This reverts commit 413eec7f5b91b74190d87ebd6af41cbacdfc80cf. --- benchkit/benchmark.py | 2 +- docs/benchmark.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/benchkit/benchmark.py b/benchkit/benchmark.py index e0e9ac06..ca70fc85 100644 --- a/benchkit/benchmark.py +++ b/benchkit/benchmark.py @@ -178,7 +178,7 @@ def get_tilt_var_names() -> List[str]: Returns: List[str]: the names of the tilt variables. """ - return [] + raise NotImplementedError @staticmethod def _write_to_record_data_dir( diff --git a/docs/benchmark.md b/docs/benchmark.md index 45589730..6c578582 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -54,6 +54,11 @@ class MyBenchmark(Benchmark): # TODO: Add your run variables here return ["importantRunVariable", "importantVariable"] + # Deprecated, but since it is still called inside of the framework, it should still be overwritten, but only an empty array should be returned. + @staticmethod + def get_tilt_var_names() -> List[str]: + return [] + # Build the source code using the values required for the current experiment def build_bench( self, From a834a59dd7b0e0c01859206f92707788abd47247 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 30 Apr 2025 15:08:51 +0200 Subject: [PATCH 15/17] docs: Remove `get_tilt_vars` This method is no longer required starting form commit dcb183e. The documentation mentions that it should be added for people that use an older version. --- docs/benchmark.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index 6c578582..b977d185 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -54,11 +54,6 @@ class MyBenchmark(Benchmark): # TODO: Add your run variables here return ["importantRunVariable", "importantVariable"] - # Deprecated, but since it is still called inside of the framework, it should still be overwritten, but only an empty array should be returned. - @staticmethod - def get_tilt_var_names() -> List[str]: - return [] - # Build the source code using the values required for the current experiment def build_bench( self, @@ -140,6 +135,14 @@ This benchmark can then be used for a [campaign](campaign.md). > Because of the definition of `parse_output_results` this benchmarking class expects a single output line of `scv` code as extra information. > If this is not what is outputted by your results, either change the output, or change the definition of `parse_output_to_results`. +> [!IMPORTANT] +> Version 0.0.1 uses `get_tilt_var_names` that should be overwritten, this is no longer required starting from commit dcb183e +> ```python +> @staticmethod +> def get_tilt_var_names() -> List[str]: +> return [] +> ``` + ## Building the benchmark To build your benchmark code you have to implement the `get_build_var_names` and `build_bench` functions in your `Benchmark` class. From 3c8a4905eac87e38917dae51c6b91d28d382df55 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Wed, 30 Apr 2025 15:30:10 +0200 Subject: [PATCH 16/17] docs: Use `benchkit.quick` in getting started documentation --- docs/getting_started.md | 67 +++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index b1e47d50..d9d7b39e 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -9,42 +9,45 @@ pip install pybenchkit ## Getting started -In order to run a benchmark, you need a [campaign](campaign.md), and you need a [benchmark](benchmark.md). -Once you have those two things, you can use these to run your benchmark in the following way. +> [!IMPORTANT] +> The `benchkit.quick` is not available in version 0.0.1 as it was added in commit `32077d5`, if you a version prior to this commit you should use make a [benchmark](benchmark.md) and a [campaign](campaign.md) -```python -from benchkit.campaign import CampaignCartesianProduct - -def runCampaign(): - # Define the campaign, here `CampaignCartesianProduct` is used, but you can also use your own campaign - campaign = CampaignCartesianProduct( - name="benchmark", - # Create a new `benchmark` class, you should add your own here. - benchmark=Benchmark(), - # How many times every experiment should be repeated - nb_runs=3, - variables={ - # Vary the amount of threads that is used - "nb_threads": [2, 4, 8], - }, - # Variables that can be used in your benchmark, but always remain constant - constants={}, - debug=False, - gdb=False, - enable_data_dir=False, - pretty={}, - ) - # Run the campaign - campaign.run() -``` +In order to run a simple benchmark, you can use the `quick_cmd_campaign` function, this function creates a simple benchmark and allows you to run it. -This will create a new campaign and run it, the results of this campaign will then be stored in the `results` folder. -If you also want the data plotted you can do this by running `campaign.generate_graph(plot_name="name")`, this graph will then also be placed in the `results` folder. -Generating this graph does, however, require some extra Python libraries, these can be installed using the following command: ```python -pip install matplotlib pandas seaborn +from benchkit.quick import quick_cmd_campaign + +def dd_cmd(optpt): + return f"dd if=/dev/zero of=/tmp/tempfile bs={optpt['bs']} count={optpt['count']}" + +optspace = { + "bs": ["4K", "16K", "64K"], + "count": [1000, 5000, 10000], +} + +if __name__ == "__main__": + campaign = quick_cmd_campaign("dd_disk_io", optspace, dd_cmd, nb_runs=3) ``` -For more info about how to use campaigns, and their arguments, see [campaign](campaign.md). +In this case, the `campaign` variable will hold a normal [campaign](campaign.md), to run it and get results from it see [Running a campaign](campaign.md#Running a campaign) and [Results](campaign.md#Results). + +The `quick_cmd_campaign` function accepts the following arguments: +* `name` + * Type: 'str' + * The name of your benchmark +* `options` + * Type: `Dict[str, List[Any]]` + * The options that your benchmark should use, using a Cartesian product of all of the given options, as described in [Creating a campaign](campaign.md#Creating a campaign). +* `benchmark` + * A function that takes all of your options, and returns the string of the command that will be ran. +* `nb_runs` + * Type: `int` + * default: `1` + * The amount of runs that each experiment should be ran. + +Here is an example to benchmark `dd` with two options + +> [!NOTE] +> Using `benchkit.quick` is only possible for simple benchmarks as it misses a lot of options, for more complex benchmarks, you should make a [benchmark](benchmark.md) and a [campaign](campaign.md). ### Running the benchmark From 5943ac708d4ba35d4402f97f2103cbaac13ba7c7 Mon Sep 17 00:00:00 2001 From: Merlijn Date: Sat, 3 May 2025 09:32:15 +0200 Subject: [PATCH 17/17] docs: Remove `get_tilt_vars` warning Remove the warning stating that older versions need to override the `get_tilt_vars` method, since this is incorrect. --- docs/benchmark.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docs/benchmark.md b/docs/benchmark.md index b977d185..45589730 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -135,14 +135,6 @@ This benchmark can then be used for a [campaign](campaign.md). > Because of the definition of `parse_output_results` this benchmarking class expects a single output line of `scv` code as extra information. > If this is not what is outputted by your results, either change the output, or change the definition of `parse_output_to_results`. -> [!IMPORTANT] -> Version 0.0.1 uses `get_tilt_var_names` that should be overwritten, this is no longer required starting from commit dcb183e -> ```python -> @staticmethod -> def get_tilt_var_names() -> List[str]: -> return [] -> ``` - ## Building the benchmark To build your benchmark code you have to implement the `get_build_var_names` and `build_bench` functions in your `Benchmark` class.