Skip to content

Commit

Permalink
CLN Remove unused private functions in bark.cpp (#109)
Browse files Browse the repository at this point in the history
  • Loading branch information
PABannier committed Sep 10, 2023
1 parent badd010 commit aa9c968
Showing 1 changed file with 0 additions and 157 deletions.
157 changes: 0 additions & 157 deletions bark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2116,160 +2116,3 @@ void bark_free(bark_context * ctx) {

delete ctx;
}

bool allequal(struct ggml_tensor * a, struct ggml_tensor * b, std::string test_name) {
assert(a->ne[0] == b->ne[0]);
assert(a->ne[1] == b->ne[1]);
assert(a->ne[2] == b->ne[2]);
assert(a->ne[3] == b->ne[3]);

assert(a->type == GGML_TYPE_I32);
assert(b->type == GGML_TYPE_I32);

int64_t n_violations = 0;

for (int i = 0; i < a->ne[3]; i++) {
for (int j = 0; j < a->ne[2]; j++) {
for (int k = 0; k < a->ne[1]; k++) {
for (int l = 0; l < a->ne[0]; l++) {
int32_t * aval = (int32_t *) (
(char *) a->data + i*a->nb[3] + j*a->nb[2] + k*a->nb[1] + l*a->nb[0]);
int32_t * bval = (int32_t *) (
(char *) b->data + i*b->nb[3] + j*b->nb[2] + k*b->nb[1] + l*b->nb[0]);
if (*aval != *bval)
n_violations += 1;
}
}
}
}

int64_t n_elements = a->ne[0]*a->ne[1]*a->ne[2]*a->ne[3];
float perc_viol = 100.0f*float(n_violations)/n_elements;

printf("%s: %s\n", __func__, test_name.c_str());
printf("%s: %%_viol=%.1f\n", __func__, perc_viol);
printf("\n");
return n_violations == 0;
}

bool allclose(struct ggml_tensor * a, struct ggml_tensor * b, float tol, std::string test_name) {
assert(a->ne[0] == b->ne[0]);
assert(a->ne[1] == b->ne[1]);
assert(a->ne[2] == b->ne[2]);
assert(a->ne[3] == b->ne[3]);

assert(a->type == GGML_TYPE_F32);
assert(b->type == GGML_TYPE_F32);

float max_violation = -INFINITY;
int64_t n_violations = 0;

for (int i = 0; i < a->ne[3]; i++) {
for (int j = 0; j < a->ne[2]; j++) {
for (int k = 0; k < a->ne[1]; k++) {
for (int l = 0; l < a->ne[0]; l++) {
float * aval = (float *) (
(char *) a->data + i*a->nb[3] + j*a->nb[2] + k*a->nb[1] + l*a->nb[0]);
float * bval = (float *) (
(char *) b->data + i*b->nb[3] + j*b->nb[2] + k*b->nb[1] + l*b->nb[0]);
float violation = fabs(*aval - *bval);
max_violation = std::max(max_violation, violation);
if (violation > tol)
n_violations += 1;
}
}
}
}

int64_t n_elements = a->ne[0]*a->ne[1]*a->ne[2]*a->ne[3];
float perc_viol = 100.0f*float(n_violations)/n_elements;

printf("%s: %s\n", __func__, test_name.c_str());
printf("%s: max_viol=%.4f; viol=%.1f%% (tol=%.4f)\n", __func__, max_violation, perc_viol, tol);
printf("\n");
return n_violations == 0;
}


void read_tensor_from_file_f32(std::ifstream & fin, struct ggml_tensor *t) {
int32_t n_dims;
read_safe(fin, n_dims);

int32_t ne[3] = { 1, 1, 1 };
for (int i = 0; i < n_dims; i++) { read_safe(fin, ne[i]); }

assert(t->ne[0] == ne[0]);
assert(t->ne[1] == ne[1]);
assert(t->ne[2] == ne[2]);
assert(t->type == GGML_TYPE_F32);

for (int i = 0; i < ne[2]; i++) {
for (int j = 0; j < ne[1]; j++) {
int offset = i*t->nb[2] + j*t->nb[1];
fin.read(reinterpret_cast<char *>(t->data) + offset, ne[0]*sizeof(float));
}
}
}

void read_tensor_from_file_int32(std::ifstream & fin, struct ggml_tensor *t) {
int32_t n_dims;
read_safe(fin, n_dims);

int32_t ne[3] = { 1, 1, 1 };
for (int i = 0; i < n_dims; i++) { read_safe(fin, ne[i]); }

assert(t->ne[0] == ne[0]);
assert(t->ne[1] == ne[1]);
assert(t->ne[2] == ne[2]);
assert(t->type == GGML_TYPE_I32);

for (int i = 0; i < ne[2]; i++) {
for (int j = 0; j < ne[1]; j++) {
int offset = i*t->nb[2] + j*t->nb[1];
fin.read(reinterpret_cast<char *>(t->data) + offset, ne[0]*sizeof(int32_t));
}
}
}

void read_tensor_from_file(std::ifstream & fin, struct ggml_tensor * t) {
if (t->type == GGML_TYPE_F32) {
read_tensor_from_file_f32(fin, t);
} else if (t->type == GGML_TYPE_I32) {
read_tensor_from_file_int32(fin, t);
} else {
throw;
}
}

void load_gt_tensor(std::string path, struct ggml_tensor * t) {
auto fin = std::ifstream(path, std::ios::binary);
if (!fin) {
fprintf(stderr, "failed to open.");
throw;
}
read_tensor_from_file(fin, t);
}

void print_tensor(struct ggml_tensor * a) {
for (int i = 0; i < a->ne[3]; i++) {
for (int j = 0; j < a->ne[2]; j++) {
for (int k = 0; k < a->ne[1]; k++) {
for (int l = 0; l < a->ne[0]; l++) {
if (a->type == GGML_TYPE_F32) {
float * aval = (float *) (
(char *) a->data + i*a->nb[3] + j*a->nb[2] + k*a->nb[1] + l*a->nb[0]);
printf("%.4f ", *aval);
} else if (a->type == GGML_TYPE_I32) {
int32_t * aval = (int32_t *) (
(char *) a->data + i*a->nb[3] + j*a->nb[2] + k*a->nb[1] + l*a->nb[0]);
printf("%d ", *aval);
} else {
throw;
}
}
printf("\n");
}
printf("\n\n");
}
}
}

0 comments on commit aa9c968

Please sign in to comment.