-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconstants.py
More file actions
27 lines (21 loc) · 1.73 KB
/
constants.py
File metadata and controls
27 lines (21 loc) · 1.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
PR_CHANGELOG_FILE = "/home/sumsharma/madhur/backporting-llm/training_llm/data/PR-changelog-libsoup.json"
UPSTREAM_PATCH_FILE = "/home/sumsharma/madhur/backporting-llm/training_llm/data/libsoup-upstream-patch.json"
PACKAGE_NAME = "libsoup"
AZURELINUX_PACKAGE_VERSION = "3.4.4"
AZURELINUX_REPO_PATH = "/home/sumsharma/madhur/azurelinux"
PACKAGE_REPO = "/home/sumsharma/madhur/package_tarballs/libsoup/3.4.4/libsoup-3.4.4/"
PATCH_TEST_FILE = "/home/sumsharma/madhur/backporting-llm/training_llm/test-environment/test.patch"
# LLM_PATH = "/mnt/codellama/CodeLlama-13b-Instruct-hf"
LLM_PATH = "/llm_mount/Qwen2.5-Coder-32B-Instruct"
FINETUNED_LLM_WEIGHTS = "/home/sumsharma/madhur/backporting-llm/training_llm/finetuned_weights"
# FINETUNE_MODEL_NAME = "Qwen2.5"
FINETUNE_MODEL_NAME = "gpt-4o"
# FINETUNED_LLM_WEIGHTS = "/home/sumsharma/madhur/codellama-setup/finetune-2/test-output"
OUTPUT_RESULT_PATH = "/home/sumsharma/madhur/backporting-llm/training_llm/test-environment/test-result.json"
BACKPORT_EXAMPLE = "/home/sumsharma/madhur/backporting-llm/training_llm/test-environment/example-backport.json"
STDOUT_PATH = "/home/sumsharma/madhur/backporting-llm/training_llm/test-environment/stdout.txt"
TEST_SPLIT_DATASET = "/home/sumsharma/madhur/backporting-llm/training_llm/test-environment/test-split-dataset.json"
PROMPT_DATA_FILE = "/home/sumsharma/madhur/backporting-llm/training_llm/prepared_prompts/prompt_data_prepared.json"
PREPARED_PROMPTS = "/home/sumsharma/madhur/backporting-llm/training_llm/prepared_prompts/prepared_prompts.json"
LOG_DIR = "/home/sumsharma/madhur/backporting-llm/training_llm/logs"
FINETUNE_LOG_DIR = "/home/sumsharma/madhur/backporting-llm/training_llm/finetuning/logs"