Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement multimodal models (LLaVA) #3436

Merged
merged 36 commits into from
Oct 12, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
59aa1ac
WIP: start implementing LLaVA
monatis Oct 2, 2023
0f0e7c6
rm scratch buf for now, will revert after cleanup
monatis Oct 2, 2023
7e9120f
LLaVA image encoder is working. will combine with llama
monatis Oct 2, 2023
d37ed47
Add llava inference code, but it's buggy. debugging
monatis Oct 3, 2023
8690f42
LLaVA is working e2e, needs to optimize memory allocation + cleanup
monatis Oct 7, 2023
94eeac3
Use ggml_allocr + rm unnecessary code
monatis Oct 8, 2023
0c2bd79
fix: crlf -> lf
monatis Oct 8, 2023
204d08b
fix: new line at EoF
monatis Oct 8, 2023
95da79e
fix: trailing whitespace
monatis Oct 8, 2023
2a04d0b
Merge branch 'master' into llava
monatis Oct 8, 2023
444dbce
Add readme
monatis Oct 9, 2023
8af7e21
Update readme
monatis Oct 9, 2023
54495c9
Some cleanup
monatis Oct 9, 2023
9b0ec4d
Are you happy editorconfig?
monatis Oct 9, 2023
8278a73
rm unused batch image preprocessing
monatis Oct 9, 2023
d78e816
rm unused import
monatis Oct 9, 2023
4759bfd
fix: rm designated initializers
monatis Oct 9, 2023
325d240
introduce pad-to-square mode for non-square images
monatis Oct 9, 2023
d75a031
are you happy editorconfig?
monatis Oct 9, 2023
ae01c85
gitignore /llava
monatis Oct 9, 2023
5009ae9
Handle cases where image file does not exist
monatis Oct 9, 2023
96171de
add llava target to Makefile
monatis Oct 9, 2023
d640aae
add support for 13b model variant
monatis Oct 10, 2023
587bde8
Maybe seed is unlucky?
monatis Oct 11, 2023
f1564bb
Merge branch 'master' into llava
monatis Oct 11, 2023
ab21587
Check if apples are compared to apples
monatis Oct 11, 2023
0409ae0
are you happy editorconfig?
monatis Oct 11, 2023
f0f7834
Use temperature = 0.1 by default
monatis Oct 11, 2023
2bc1710
command line: use gpt_params_parse()
monatis Oct 11, 2023
1403d87
Merge master and fix conflicts
monatis Oct 11, 2023
dc913ea
minor
monatis Oct 12, 2023
56ccf97
handle default n_predict
monatis Oct 12, 2023
e9534ea
fix typo
monatis Oct 12, 2023
346e3c1
Merge branch 'master' into llava
ggerganov Oct 12, 2023
4bc5c9c
llava : code formatting, rename files, fix compile warnings
ggerganov Oct 12, 2023
0bd7e69
do not use Wno-cast-qual for MSVC
monatis Oct 12, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Prev Previous commit
Next Next commit
Add llava inference code, but it's buggy. debugging
  • Loading branch information
monatis committed Oct 3, 2023
commit d37ed4750f16fcfd42498d5dde9c4b763212e7b6
4 changes: 2 additions & 2 deletions examples/llava/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ if(TARGET BUILD_INFO)
add_dependencies(${TARGET} BUILD_INFO)
endif()

set(TARGET clip-test)
add_executable(${TARGET} clip-test.cpp)
set(TARGET llava)
add_executable(${TARGET} llava.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama clip ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_11)
Expand Down
29 changes: 0 additions & 29 deletions examples/llava/clip-test.cpp

This file was deleted.

11 changes: 10 additions & 1 deletion examples/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1120,6 +1120,9 @@ bool clip_image_batch_encode(const clip_ctx * ctx, const int n_threads, const cl
const int projection_dim = hparams.projection_dim;
const float eps = hparams.eps;
int batch_size = imgs->size;
if(ctx->has_llava_projector) {
GGML_ASSERT(batch_size == 1);
}

auto & buf_compute = ctx->buf_compute;

Expand Down Expand Up @@ -1192,7 +1195,7 @@ bool clip_image_batch_encode(const clip_ctx * ctx, const int n_threads, const cl
}

// loop over layers
for (int il = 0; il < n_layer; il++) {
for (int il = 0; il < n_layer - 1; il++) {
struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states

const size_t nb_q_w = model.layers[il].q_w->nb[0];
Expand Down Expand Up @@ -1283,6 +1286,12 @@ bool clip_image_batch_encode(const clip_ctx * ctx, const int n_threads, const cl
output = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
embeddings = ggml_mul_mat(ctx0, model.llava_proj_w, embeddings);
output = ggml_add(ctx0, ggml_repeat(ctx0, model.llava_proj_b, embeddings), embeddings);
output = ggml_reshape_2d(ctx0, output, output->ne[0], output->ne[1]);
struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
for (int i = 0; i < num_patches; ++i) {
ggml_set_i32_1d(patches, i, i+1);
}
output = ggml_get_rows(ctx0, output, patches);
} else {
// get the output of cls token, e.g., 0th index
struct ggml_tensor * cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, batch_size);
Expand Down
223 changes: 223 additions & 0 deletions examples/llava/llava.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,223 @@
#include <stdio.h>
#include <stdlib.h>
#include <vector>

#include "clip.h"
#include "common.h"
#include "llama.h"


static bool eval_image_embd(llama_context * ctx_llama, float * embd, int N, int * n_past) {
int n_embd = llama_n_embd(llama_get_model(ctx_llama));
int n_batch = N; // params.n_batch;

for (int i = 0; i < (int) N; i += n_batch) {
int n_eval = (int) N - i;
if (n_eval > n_batch) {
n_eval = n_batch;
}
llama_batch batch = {int32_t(n_eval), nullptr, (embd+i*n_embd), nullptr, nullptr, nullptr, *n_past, 1, 0, };
if (llama_decode(ctx_llama, batch)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
}
*n_past += n_eval;
}
return true;
}

static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int N, int * n_past) {
int n_batch = N;
for (int i = 0; i < (int) tokens.size(); i += n_batch) {
int n_eval = (int) tokens.size() - i;
if (n_eval > n_batch) {
n_eval = n_batch;
}
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
}
*n_past += n_eval;
}
return true;
}

static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
std::vector<llama_token> tokens;
tokens.push_back(id);
return eval_tokens(ctx_llama, tokens, 1, n_past);
}

static bool eval_string(struct llama_context * ctx_llama, const char* str, int N, int * n_past){
std::string str2 = str;
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, true);
eval_tokens(ctx_llama, embd_inp, N, n_past);
return true;
}

static llama_token sample_id(llama_context * ctx_llama, gpt_params & params) {
// out of user input, sample next token
const float temp = params.temp;
const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : params.top_k;
const float top_p = params.top_p;
const float tfs_z = params.tfs_z;
const float typical_p = params.typical_p;
// const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
// const float repeat_penalty = params.repeat_penalty;
// const float alpha_presence = params.presence_penalty;
// const float alpha_frequency = params.frequency_penalty;
const int mirostat = params.mirostat;
const float mirostat_tau = params.mirostat_tau;
const float mirostat_eta = params.mirostat_eta;
// const bool penalize_nl = params.penalize_nl;

llama_token id = 0;
{
auto logits = llama_get_logits(ctx_llama);
auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama));

// Apply params.logit_bias map
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
logits[it->first] += it->second;
}

std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
}

llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };

// TODO: Apply penalties
// float nl_logit = logits[llama_token_nl(ctx)];
// auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
// llama_sample_repetition_penalty(ctx, &candidates_p,
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
// last_n_repeat, repeat_penalty);
// llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
// last_n_repeat, alpha_frequency, alpha_presence);
// if (!penalize_nl) {
// logits[llama_token_nl(ctx)] = nl_logit;
// }

if (temp <= 0) {
// Greedy sampling
id = llama_sample_token_greedy(ctx_llama, &candidates_p);
} else {
if (mirostat == 1) {
static float mirostat_mu = 2.0f * mirostat_tau;
const int mirostat_m = 100;
llama_sample_temp(ctx_llama, &candidates_p, temp);
id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
} else if (mirostat == 2) {
static float mirostat_mu = 2.0f * mirostat_tau;
llama_sample_temp(ctx_llama, &candidates_p, temp);
id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
} else {
// Temperature sampling
llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1);
llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1);
llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1);
llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1);
llama_sample_temp(ctx_llama, &candidates_p, temp);
id = llama_sample_token(ctx_llama, &candidates_p);
}
}
}

return id;
}

const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) {
int id = sample_id(ctx_llama, params);
static std::string ret;
if (id == llama_token_eos(ctx_llama)) {
ret = "</s>";
} else {
ret = llama_token_to_piece(ctx_llama, id);
}
eval_id(ctx_llama, id, n_past);
return ret.c_str();
}

int main(int argc, char ** argv) {
gpt_params params;

if (argc < 3) {
printf("usage: %s <path/to/llava-rlhf-qe_k.gguf> <path/to/llava-encoder-f16.gguf> [path/to/an/image.jpg] [a text prompt]\n", argv[0]);
}

params.model = argv[1];
const char * clip_path = argv[2];
const char * img_path;
if (argc >= 4) {
img_path = argv[3];
}

if (argc >= 5) {
params.prompt = argv[4];
}

if (params.prompt.empty()) {
params.prompt = "user: describe the image in detail.\nassistant:";
}


auto ctx_clip = clip_model_load(clip_path, 1);
clip_image_u8 img;
clip_image_f32 img_res;
clip_image_load_from_file(img_path, &img);
clip_image_preprocess(ctx_clip, &img, &img_res);
float * vec = (float *)malloc(4096 * 256 * sizeof(float));
clip_image_encode(ctx_clip, params.n_threads, &img_res, vec, false);
clip_free(ctx_clip);

llama_backend_init(params.numa);

llama_model_params model_params = llama_model_default_params();
// model_params.n_gpu_layers = 99; // offload all layers to the GPU
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
if (model == NULL) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}

llama_context_params ctx_params = llama_context_default_params();
ctx_params.seed = 1234;
ctx_params.n_ctx = 2048;
ctx_params.n_threads = params.n_threads;
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);

if (ctx_llama == NULL) {
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
return 1;
}

int n_past = 0;
int max_tgt_len = 256;
//eval_string(ctx_llama, params.prompt.c_str(), params.n_batch, &n_past);
eval_image_embd(ctx_llama, vec, 256, &n_past);
//eval_string(ctx_llama, "assistant:", params.n_batch, &n_past);
printf("n_past = %d\n", n_past);

const char* tmp;
for (int i=0; i<max_tgt_len; i++) {
tmp = sample(ctx_llama, params, &n_past);
if (strcmp(tmp, "</s>")==0) break;
printf("%s", tmp);
fflush(stdout);
}
printf("\n");

llama_print_timings(ctx_llama);

llama_free(ctx_llama);
llama_free_model(model);
llama_backend_free();
free(vec);

return 0;
}