Skip to content

Commit

Permalink
llama-bench : add support for the RPC backend (llama/7435)
Browse files Browse the repository at this point in the history
  • Loading branch information
rgerganov authored and ggerganov committed Jun 15, 2024
1 parent c11302c commit cbe312b
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 0 deletions.
1 change: 1 addition & 0 deletions include/ggml/ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -2428,6 +2428,7 @@ extern "C" {
GGML_API int ggml_cpu_has_sse3 (void);
GGML_API int ggml_cpu_has_ssse3 (void);
GGML_API int ggml_cpu_has_sycl (void);
GGML_API int ggml_cpu_has_rpc (void);
GGML_API int ggml_cpu_has_vsx (void);
GGML_API int ggml_cpu_has_matmul_int8(void);

Expand Down
8 changes: 8 additions & 0 deletions src/ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -22872,6 +22872,14 @@ int ggml_cpu_has_sycl(void) {
#endif
}

int ggml_cpu_has_rpc(void) {
#if defined(GGML_USE_RPC)
return 1;
#else
return 0;
#endif
}

int ggml_cpu_has_gpublas(void) {
return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
ggml_cpu_has_sycl();
Expand Down

0 comments on commit cbe312b

Please sign in to comment.