Skip to content

Commit d13d0f6

Browse files
authored
hparams : initialize arrays (#13728)
ggml-ci
1 parent 8a2afb7 commit d13d0f6

File tree

3 files changed

+4
-7
lines changed

3 files changed

+4
-7
lines changed

src/llama-hparams.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,6 @@
22

33
#include "ggml.h"
44

5-
llama_hparams::llama_hparams() {
6-
swa_layers.fill(false);
7-
}
8-
95
void llama_hparams::set_swa_pattern(uint32_t n_pattern) {
106
for (uint32_t il = 0; il < n_layer; ++il) {
117
swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));

src/llama-hparams.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,6 @@ struct llama_hparams {
145145
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
146146
enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
147147

148-
llama_hparams();
149-
150148
// this value n_pattern means that every nth layer is dense (i.e. non-SWA)
151149
// note that if n_pattern == 0, all layers are SWA
152150
// if n_pattern == 1, all layers are dense

src/llama-model.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,11 +463,14 @@ void llama_model::load_hparams(llama_model_loader & ml) {
463463
GGML_ASSERT(hparams.n_expert_used == 0);
464464
}
465465

466-
// zero-out the array hparams
467466
std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
468467
std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
469468
std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
470469

470+
std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
471+
472+
std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0);
473+
471474
ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false);
472475
ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
473476

0 commit comments

Comments
 (0)