Skip to content

Commit 7b96c45

Browse files
committed
moved starter config.yml inside the module so it gets installed with pip
1 parent 40296d7 commit 7b96c45

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

llmtune/cli/toolkit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ def generate_config():
125125
"""
126126
Generate an example `config.yml` file in current directory
127127
"""
128-
module_path = Path(llmtune.__file__).parent
128+
module_path = Path(llmtune.__file__)
129129
example_config_path = module_path.parent / EXAMPLE_CONFIG_FNAME
130130
destination = Path.cwd()
131131
shutil.copy(example_config_path, destination)

config.yml renamed to llmtune/config.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,15 @@ data:
1717
prompt_stub:
1818
>- # Stub to add for training at the end of prompt, for test set or inference, this is omitted; make sure only one variable is present
1919
{output}
20-
test_size: 0.1 # Proportion of test as % of total; if integer then # of samples
21-
train_size: 0.9 # Proportion of train as % of total; if integer then # of samples
20+
test_size: 25 # Proportion of test as % of total; if integer then # of samples
21+
train_size: 500 # Proportion of train as % of total; if integer then # of samples
2222
train_test_split_seed: 42
2323

2424
# Model Definition -------------------
2525
model:
2626
hf_model_ckpt: "NousResearch/Llama-2-7b-hf"
2727
torch_dtype: "bfloat16"
28-
attn_implementation: "flash_attention_2"
28+
#attn_implementation: "flash_attention_2"
2929
quantize: true
3030
bitsandbytes:
3131
load_in_4bit: true

0 commit comments

Comments
 (0)