Skip to content

Commit 0dfbbdc

Browse files
committed
added docs
1 parent 9ae6f41 commit 0dfbbdc

File tree

4 files changed

+14
-4
lines changed

4 files changed

+14
-4
lines changed

llama-cpp-2/src/context/session.rs

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,10 @@ impl LlamaContext<'_> {
4444
///
4545
/// * `path_session` - The file to save to.
4646
/// * `tokens` - The tokens to associate the session with. This should be a prefix of a sequence of tokens that the context has processed, so that the relevant KV caches are already filled.
47+
///
48+
/// # Errors
49+
///
50+
/// Fails if the path is not a valid utf8, is not a valid c string, or llama.cpp fails to save the session file.
4751
pub fn save_session_file(
4852
&self,
4953
path_session: impl AsRef<Path>,
@@ -77,6 +81,10 @@ impl LlamaContext<'_> {
7781
///
7882
/// * `path_session` - The file to load from. It must be a session file from a compatible context, otherwise the function will error.
7983
/// * `max_tokens` - The maximum token length of the loaded session. If the session was saved with a longer length, the function will error.
84+
///
85+
/// # Errors
86+
///
87+
/// Fails if the path is not a valid utf8, is not a valid c string, or llama.cpp fails to load the session file. (e.g. the file does not exist, is not a session file, etc.)
8088
pub fn load_session_file(
8189
&mut self,
8290
path_session: impl AsRef<Path>,
@@ -95,7 +103,7 @@ impl LlamaContext<'_> {
95103
if llama_cpp_sys_2::llama_load_session_file(
96104
self.context.as_ptr(),
97105
cstr.as_ptr(),
98-
tokens.as_mut_ptr() as *mut i32,
106+
tokens.as_mut_ptr().cast::<i32>(),
99107
max_tokens,
100108
&mut n_out,
101109
) {

llama-cpp-2/src/llama_backend.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ impl LlamaBackend {
6464
pub fn init_numa(strategy: NumaStrategy) -> crate::Result<LlamaBackend> {
6565
Self::mark_init()?;
6666
unsafe {
67-
llama_cpp_sys_2::llama_numa_init(llama_cpp_sys_2::ggml_numa_strategy::from(strategy))
67+
llama_cpp_sys_2::llama_numa_init(llama_cpp_sys_2::ggml_numa_strategy::from(strategy));
6868
}
6969
Ok(LlamaBackend {})
7070
}

llama-cpp-2/src/model.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,8 @@ impl LlamaModel {
305305
/// # Errors
306306
///
307307
/// There is many ways this can fail. See [`LlamaContextLoadError`] for more information.
308+
// we intentionally do not derive Copy on `LlamaContextParams` to allow llama.cpp to change the type to be non-trivially copyable.
309+
#[allow(clippy::needless_pass_by_value)]
308310
pub fn new_context(
309311
&self,
310312
_: &LlamaBackend,

simple/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ enum Model {
5858

5959
impl Model {
6060
/// Convert the model to a path - may download from huggingface
61-
fn as_path(self) -> Result<PathBuf> {
61+
fn get_or_load(self) -> Result<PathBuf> {
6262
match self {
6363
Model::Local { path } => Ok(path),
6464
Model::HuggingFace { model, repo } => ApiBuilder::new()
@@ -97,7 +97,7 @@ fn main() -> Result<()> {
9797
};
9898

9999
let model_path = model
100-
.as_path()
100+
.get_or_load()
101101
.with_context(|| "failed to get model from args")?;
102102

103103
let model = LlamaModel::load_from_file(&backend, model_path, &model_params)

0 commit comments

Comments
 (0)