Skip to content

Commit 5004fdd

Browse files
committed
[Android] Add API to use new config
1 parent 77c48f7 commit 5004fdd

File tree

1 file changed

+24
-0
lines changed
  • extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm

1 file changed

+24
-0
lines changed

extension/android/executorch_android/src/main/java/org/pytorch/executorch/extension/llm/LlmModule.java

+24
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,17 @@ public LlmModule(int modelType, String modulePath, String tokenizerPath, float t
5959
mHybridData = initHybrid(modelType, modulePath, tokenizerPath, temperature, null);
6060
}
6161

62+
/** Constructs a LLM Module for a model with given #LlmModuleConfig */
63+
public LlmModule(LlmModuleConfig config) {
64+
mHybridData =
65+
initHybrid(
66+
config.modelType,
67+
config.modulePath,
68+
config.tokenizerPath,
69+
config.temperature,
70+
config.dataPath);
71+
}
72+
6273
public void resetNative() {
6374
mHybridData.resetNative();
6475
}
@@ -107,6 +118,19 @@ public int generate(String prompt, int seqLen, LlmCallback llmCallback, boolean
107118
return generate(null, 0, 0, 0, prompt, seqLen, llmCallback, echo);
108119
}
109120

121+
/**
122+
* Start generating tokens from the module.
123+
*
124+
* @param prompt Input prompt
125+
* @param config the config for generation
126+
* @param llmCallback callback object to receive results
127+
*/
128+
public int generate(String prompt, LlmGenerationConfig config, LlmCallback llmCallback) {
129+
int seqLen = config.getSeqLen();
130+
boolean echo = config.isEcho();
131+
return generate(null, 0, 0, 0, prompt, seqLen, llmCallback, echo);
132+
}
133+
110134
/**
111135
* Start generating tokens from the module.
112136
*

0 commit comments

Comments
 (0)