Skip to content

Commit

Permalink
POC: Even lower rmse 4-bit Q4_0 quantization
Browse files Browse the repository at this point in the history
Basically, we use two Q4_0 quantizations, each having 16 weights,
to a quantize a set of 32 weights. We get two separate scaling
factors, which we store as fp16, ending up using the exact same
5 bits per weight as the current Q4_0.

We end up witn an rmse of ~0.00159, so basically the same as
the improved Q4_1. But this should run faster than `Q4_1`
(unless fp16 -> fp32 conversion is somehow very slow).
  • Loading branch information
Kawrakow committed Apr 12, 2023
1 parent 29b83e5 commit 679e1cb
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 4 deletions.
8 changes: 5 additions & 3 deletions examples/quantize-stats/quantize-stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,16 +307,18 @@ int main(int argc, char ** argv) {

// loop throught quantization types
//for (int i = 0; i < GGML_TYPE_COUNT; i++) {
for (int i = 1; i < 2; i++) {
for (int i = 0; i < 1; i++) {
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
continue;
}
quantize_fns_t qfns = ggml_internal_get_quantize_fn(i);
if (i < 2 && checkNewQuantization) {
//qfns.quantize_row_q = i == 0 ? kQuantizeQ4_0 : kQuantizeQ4_1;
//qfns.quantize_row_q = i == 0 ? kQuantizeQ4_0 : kQuantizeQ5_1;
qfns.quantize_row_q = i == 0 ? kQuantizeQ4_0 : kQuantizeQ5_1_Fast;
if (i == 1) qfns.dequantize_row_q = kDequantizeQ5_1;
//qfns.quantize_row_q = i == 0 ? kQuantizeQ4_0 : kQuantizeQ5_1_Fast;
//if (i == 1) qfns.dequantize_row_q = kDequantizeQ5_1;
qfns.quantize_row_q = i == 0 ? kQuantizeQ4_0K : kQuantizeQ5_1;
qfns.dequantize_row_q = i == 0 ? kDequantizeQ4_0K : kDequantizeQ5_1;
}
if (qfns.quantize_row_q && qfns.dequantize_row_q) {
if (params.verbose) {
Expand Down
40 changes: 39 additions & 1 deletion ggml_extra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,15 @@ void kQuantizeQ4(const float* X, void* buffer, int k, int type) {
std::memcpy(q, &result.second, sizeof(result.second)); q += sizeof(result.second);
std::memcpy(q, &result.first, sizeof(result.first)); q += sizeof(result.first);
for (int k=0; k<QK/2; ++k) q[k] = L[2*k] | (L[2*k+1] << 4);
} else if (type == 4) {
auto scale1 = quanizeRmseK7(QK/2, X, L);
auto scale2 = quanizeRmseK7(QK/2, X+QK/2, L+QK/2);
//printf("scale1 = %g, scale2 = %g\n",scale1,scale2);
auto scale1fp16 = ggml_fp32_to_fp16(scale1);
auto scale2fp16 = ggml_fp32_to_fp16(scale2);
std::memcpy(q, &scale1fp16, sizeof(scale1fp16)); q += sizeof(scale1fp16);
std::memcpy(q, &scale2fp16, sizeof(scale2fp16)); q += sizeof(scale2fp16);
for (int k=0; k<QK/2; ++k) q[k] = (L[2*k] + 8) | ((L[2*k+1] + 8) << 4);
} else {
auto result = type == 2 ? kQuantize1(QK, X, L, tmpX, work, 15) : kQuantize1Fast(QK, X, L, 31);
auto afp16 = ggml_fp32_to_fp16(result.first);
Expand All @@ -390,7 +399,7 @@ void kQuantizeQ4(const float* X, void* buffer, int k, int type) {
}
};

auto bucketSize = type == 0 ? kBucketSize0 : kBucketSize1;
auto bucketSize = type == 0 || type == 4 ? kBucketSize0 : kBucketSize1;
auto y = (char*)buffer;
int nchunk = (k + kChunkSize-1)/kChunkSize;
if (nchunk < 2) {
Expand Down Expand Up @@ -517,4 +526,33 @@ void kDequantizeQ5_1(const void* x, float* y, int k) {
}
}

void kQuantizeQ4_0K(const float* x, void* buffer, int k) {
kQuantizeQ4(x, buffer, k, 4);
}

void kDequantizeQ4_0K(const void* x, float* y, int k) {
assert(k % QK == 0);
int n = k / QK;
auto data = (const uint8_t*)x;
for (int i=0; i<n; ++i) {
ggml_fp16_t afp16, bfp16;
std::memcpy(&afp16, data, sizeof(afp16)); data += sizeof(afp16);
std::memcpy(&bfp16, data, sizeof(bfp16)); data += sizeof(bfp16);
auto a = ggml_fp16_to_fp32(afp16);
auto b = ggml_fp16_to_fp32(bfp16);
for (int k=0; k<8; ++k) {
int8_t l1 = data[k] & 15, l2 = data[k] >> 4;
l1 -= 8; l2 -= 8;
*y++ = a*l1; *y++ = a*l2;
}
data += 8;
for (int k=0; k<8; ++k) {
int8_t l1 = data[k] & 15, l2 = data[k] >> 4;
l1 -= 8; l2 -= 8;
*y++ = b*l1; *y++ = b*l2;
}
data += 8;
}
}

}
3 changes: 3 additions & 0 deletions ggml_extra.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ void kQuantizeQ5_1_Fast(const float* GGML_RESTRICT x, void* GGML_RESTRICT y, int
size_t kQuantizeQ5_1H_Fast(const float* GGML_RESTRICT x, void* GGML_RESTRICT y, int k, int64_t* hist);
void kDequantizeQ5_1(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);

void kQuantizeQ4_0K(const float* GGML_RESTRICT x, void* GGML_RESTRICT y, int k);
void kDequantizeQ4_0K(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);

#ifdef __cplusplus
}
#endif

0 comments on commit 679e1cb

Please sign in to comment.