Skip to content

Commit

Permalink
fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
LostRuins committed Jul 3, 2023
1 parent 3d2907d commit bfeb347
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
4 changes: 2 additions & 2 deletions otherarch/gptj_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
const auto & hparams = model.hparams;
size_t vram_total = 0;
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
Expand All @@ -373,7 +373,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#endif
}
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
}
#endif

Expand Down
4 changes: 2 additions & 2 deletions otherarch/mpt_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
const auto & hparams = model.hparams;
size_t vram_total = 0;
const int n_gpu = std::min(gpulayers, int(hparams.n_layers));
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.ffn_up_proj->backend = GGML_BACKEND_GPU;
Expand All @@ -320,7 +320,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
#endif
}
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
}
#endif

Expand Down
4 changes: 2 additions & 2 deletions otherarch/neox_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
const auto & hparams = model.hparams;
size_t vram_total = 0;
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
for (int i = 0; i < n_gpu; ++i) {
const auto & layer = model.layers[i];
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
Expand All @@ -354,7 +354,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#endif
}
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
}
#endif

Expand Down

0 comments on commit bfeb347

Please sign in to comment.