Skip to content

Commit

Permalink
mnist : cleanup main.cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed May 24, 2023
1 parent 1a42ea9 commit d30ef19
Showing 1 changed file with 86 additions and 67 deletions.
153 changes: 86 additions & 67 deletions examples/mnist/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,21 @@

#include "common.h"

#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <iostream>
#include <unistd.h>
#include <time.h>
#include <algorithm>

// default hparams
struct mnist_hparams {
int32_t n_input = 784;
int32_t n_hidden = 500;
int32_t n_input = 784;
int32_t n_hidden = 500;
int32_t n_classes = 10;
};


struct mnist_model {
mnist_hparams hparams;

Expand Down Expand Up @@ -56,24 +50,21 @@ bool mnist_model_load(const std::string & fname, mnist_model & model) {
}

auto & ctx = model.ctx;

size_t ctx_size = 0;

{
const auto & hparams = model.hparams;

const int n_input = hparams.n_input;
const int n_hidden = hparams.n_hidden;
const int n_input = hparams.n_input;
const int n_hidden = hparams.n_hidden;
const int n_classes = hparams.n_classes;

// fc1 weight
ctx_size += n_input * n_hidden * ggml_type_sizef(GGML_TYPE_F32);
// fc1 bias
ctx_size += n_hidden * ggml_type_sizef(GGML_TYPE_F32);
ctx_size += n_input * n_hidden * ggml_type_sizef(GGML_TYPE_F32); // fc1 weight
ctx_size += n_hidden * ggml_type_sizef(GGML_TYPE_F32); // fc1 bias

// fc2 weight
ctx_size += n_hidden * n_classes * ggml_type_sizef(GGML_TYPE_F32);
// fc2 bias
ctx_size += n_classes * ggml_type_sizef(GGML_TYPE_F32);
ctx_size += n_hidden * n_classes * ggml_type_sizef(GGML_TYPE_F32); // fc2 weight
ctx_size += n_classes * ggml_type_sizef(GGML_TYPE_F32); // fc2 bias

printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
}
Expand All @@ -99,25 +90,31 @@ bool mnist_model_load(const std::string & fname, mnist_model & model) {
int32_t n_dims;
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));

int32_t ne_weight[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_weight[i]), sizeof(ne_weight[i]));
}

// FC1 dimensions taken from file, eg. 768x500
model.hparams.n_input = ne_weight[0];
model.hparams.n_hidden = ne_weight[1];
{
int32_t ne_weight[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_weight[i]), sizeof(ne_weight[i]));
}

model.fc1_weight = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, model.hparams.n_input, model.hparams.n_hidden);
fin.read(reinterpret_cast<char *>(model.fc1_weight->data), ggml_nbytes(model.fc1_weight));
// FC1 dimensions taken from file, eg. 768x500
model.hparams.n_input = ne_weight[0];
model.hparams.n_hidden = ne_weight[1];

int32_t ne_bias[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_bias[i]), sizeof(ne_bias[i]));
model.fc1_weight = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, model.hparams.n_input, model.hparams.n_hidden);
fin.read(reinterpret_cast<char *>(model.fc1_weight->data), ggml_nbytes(model.fc1_weight));
ggml_set_name(model.fc1_weight, "fc1_weight");
}

model.fc1_bias = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_hidden);
fin.read(reinterpret_cast<char *>(model.fc1_bias->data), ggml_nbytes(model.fc1_bias));
{
int32_t ne_bias[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_bias[i]), sizeof(ne_bias[i]));
}

model.fc1_bias = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_hidden);
fin.read(reinterpret_cast<char *>(model.fc1_bias->data), ggml_nbytes(model.fc1_bias));
ggml_set_name(model.fc1_bias, "fc1_bias");
}
}

// Read FC2 layer 2
Expand All @@ -126,24 +123,32 @@ bool mnist_model_load(const std::string & fname, mnist_model & model) {
int32_t n_dims;
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));

int32_t ne_weight[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_weight[i]), sizeof(ne_weight[i]));
}
{
int32_t ne_weight[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_weight[i]), sizeof(ne_weight[i]));
}

// FC1 dimensions taken from file, eg. 10x500
model.hparams.n_classes = ne_weight[1];
// FC1 dimensions taken from file, eg. 10x500
model.hparams.n_classes = ne_weight[1];

model.fc2_weight = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, model.hparams.n_hidden, model.hparams.n_classes);
fin.read(reinterpret_cast<char *>(model.fc2_weight->data), ggml_nbytes(model.fc2_weight));
ggml_set_name(model.fc2_weight, "fc2_weight");
}

model.fc2_weight = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, model.hparams.n_hidden, model.hparams.n_classes);
fin.read(reinterpret_cast<char *>(model.fc2_weight->data), ggml_nbytes(model.fc2_weight));
{
int32_t ne_bias[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_bias[i]), sizeof(ne_bias[i]));
}

int32_t ne_bias[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne_bias[i]), sizeof(ne_bias[i]));
model.fc2_bias = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_classes);
fin.read(reinterpret_cast<char *>(model.fc2_bias->data), ggml_nbytes(model.fc2_bias));
ggml_set_name(model.fc2_bias, "fc2_bias");
}
model.fc2_bias = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_classes);
fin.read(reinterpret_cast<char *>(model.fc2_bias->data), ggml_nbytes(model.fc2_bias));
}

fin.close();

return true;
Expand All @@ -153,7 +158,8 @@ bool mnist_model_load(const std::string & fname, mnist_model & model) {
//
// - model: the model
// - n_threads: number of threads to use
// - digit: 784 pixel values
// - digit: 784 pixel values
//
// returns 0 - 9 prediction
int mnist_eval(
const mnist_model & model,
Expand All @@ -177,76 +183,89 @@ int mnist_eval(

struct ggml_tensor * input = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, hparams.n_input);
memcpy(input->data, digit.data(), ggml_nbytes(input));
ggml_set_name(input, "input");

// fc1 MLP = Ax + b
ggml_tensor * fc1 = ggml_add(ctx0, ggml_mul_mat(ctx0, model.fc1_weight, input), model.fc1_bias);
ggml_tensor * fc1 = ggml_add(ctx0, ggml_mul_mat(ctx0, model.fc1_weight, input), model.fc1_bias);
ggml_tensor * fc2 = ggml_add(ctx0, ggml_mul_mat(ctx0, model.fc2_weight, ggml_relu(ctx0, fc1)), model.fc2_bias);

// soft max
ggml_tensor * final = ggml_soft_max(ctx0, fc2);
ggml_tensor * probs = ggml_soft_max(ctx0, fc2);

// run the computation
ggml_build_forward_expand(&gf, final);
ggml_build_forward_expand(&gf, probs);
ggml_graph_compute (ctx0, &gf);

//ggml_graph_print (&gf);
ggml_graph_dump_dot(&gf, NULL, "mnist.dot");
float* finalData = ggml_get_data_f32(final);

int prediction = std::max_element(finalData, finalData + 10) - finalData;
const float * probs_data = ggml_get_data_f32(probs);

const int prediction = std::max_element(probs_data, probs_data + 10) - probs_data;

ggml_free(ctx0);

return prediction;
}

int main(int argc, char ** argv) {
srand(time(NULL));
ggml_time_init();

if (argc != 3) {
fprintf(stderr, "Usage: %s models/mnist/ggml-model-f32.bin models/mnist/t10k-images.idx3-ubyte\n", argv[0]);
exit(0);
}

int64_t t_load_us = 0;

uint8_t buf[784];
mnist_model model;
std::vector<float> digit;
// load the model, load a random test digit, evaluate the model

// load the model
{
const int64_t t_start_us = ggml_time_us();

if (!mnist_model_load(argv[1], model)) {
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, "models/ggml-model-f32.bin");
return 1;
}
auto fin = std::ifstream(argv[2], std::ios::binary);

const int64_t t_load_us = ggml_time_us() - t_start_us;

fprintf(stdout, "%s: loaded model in %8.2f ms\n", __func__, t_load_us / 1000.0f);
}

// read a random digit from the test set
{
std::ifstream fin(argv[2], std::ios::binary);
if (!fin) {
fprintf(stderr, "%s: failed to open '%s'\n", __func__, argv[2]);
return 1;
}

unsigned char buf[784];
srand(time(NULL));
// Seek to a random digit: 16-byte header + 28*28 * (random 0 - 10000)
// seek to a random digit: 16-byte header + 28*28 * (random 0 - 10000)
fin.seekg(16 + 784 * (rand() % 10000));
fin.read((char *) &buf, sizeof(buf));
}

// render the digit in ASCII
{
digit.resize(sizeof(buf));

// render the digit in ASCII
for(int row = 0; row < 28; row++) {
for (int row = 0; row < 28; row++) {
for (int col = 0; col < 28; col++) {
fprintf(stderr, "%c ", (float)buf[row*28 + col] > 230 ? '*' : '_');
digit[row*28+col]=((float)buf[row*28+col]);

digit[row*28 + col] = ((float)buf[row*28 + col]);
}

fprintf(stderr, "\n");
}
fprintf(stderr, "\n");

t_load_us = ggml_time_us() - t_start_us;
fprintf(stderr, "\n");
}

fprintf(stdout, "%s: predicted digit is %d\n", __func__, mnist_eval(model, 1, digit));

fprintf(stdout, "Predicted digit is %d\n", mnist_eval(model, 1, digit));
ggml_free(model.ctx);

return 0;
Expand Down

0 comments on commit d30ef19

Please sign in to comment.