Skip to content

Commit

Permalink
Generalize for each net quantization
Browse files Browse the repository at this point in the history
  • Loading branch information
anson0910 committed Nov 14, 2015
1 parent 3990078 commit 3ed8307
Showing 1 changed file with 13 additions and 7 deletions.
20 changes: 13 additions & 7 deletions face_net_surgery/face_12_cal_quantize_3_to_9.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import numpy as np
import sys

modelName = 'face_12_cal'
modelFileName = 'face_12_cal_train_iter_400000.caffemodel'

# ================== caffe ======================================
caffe_root = '/home/anson/caffe-master/' # this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
Expand Down Expand Up @@ -38,26 +41,28 @@ def round_number(num, fixedPointList):
return result

# ================== load face12c_full_conv ======================================
MODEL_FILE = '/home/anson/caffe-master/models/face_12_cal/deploy.prototxt'
PRETRAINED = '/home/anson/caffe-master/models/face_12_cal/face_12_cal_train_iter_400000.caffemodel'
MODEL_FILE = '/home/anson/caffe-master/models/' + modelName + '/deploy.prototxt'
PRETRAINED = '/home/anson/caffe-master/models/' + modelName + '/' + modelFileName
caffe.set_mode_gpu()
net = caffe.Net(MODEL_FILE, PRETRAINED, caffe.TEST)
# ============ should be modified for different files ================
params = ['conv1', 'fc2', 'fc3']
# =====================================================================
# fc_params = {name: (weights, biases)}
original_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}

for quantize_bit_num in range(3, 10):
# ================== load file to save quantized parameters =======================
MODEL_FILE = '/home/anson/caffe-master/models/face_12_cal/deploy.prototxt'
PRETRAINED = '/home/anson/caffe-master/models/face_12_cal/face_12_cal_quantize_' \
MODEL_FILE = '/home/anson/caffe-master/models/' + modelName +'/deploy.prototxt'
PRETRAINED = '/home/anson/caffe-master/models/' + modelName + '/' + modelName + '_quantize_' \
+ str(quantize_bit_num) +'.caffemodel'
quantized_model = open(PRETRAINED, 'w')
net_quantized = caffe.Net(MODEL_FILE, PRETRAINED, caffe.TEST)
params_quantized = params
# conv_params = {name: (weights, biases)}
quantized_params = {pr: (net_quantized.params[pr][0].data, net_quantized.params[pr][1].data) for pr in params_quantized}

print "\n============face_12_cal================="
print "\n============" + modelName + "================="

# transplant
for pr, pr_quantized in zip(params, params_quantized):
Expand All @@ -69,6 +74,7 @@ def round_number(num, fixedPointList):
filters_weights = net_quantized.params[k][0].data
filters_bias = net_quantized.params[k][1].data

# ============ should be modified for different files ================
if k == 'conv1':
a_weight = 0
a_bias = -4
Expand All @@ -78,6 +84,7 @@ def round_number(num, fixedPointList):
elif k == 'fc3':
a_weight = -5
a_bias = 0
# =====================================================================

b_weight = quantize_bit_num - 1 - a_weight
b_bias = quantize_bit_num - 1 - a_bias
Expand All @@ -97,7 +104,6 @@ def round_number(num, fixedPointList):
for currentNum in np.nditer(filters_bias, op_flags=['readwrite']):
currentNum[...] = round_number(currentNum[...], biasFixedPointList)

net_quantized.save('/home/anson/caffe-master/models/face_12_cal/face_12_cal_quantize_'
+ str(quantize_bit_num) +'.caffemodel')
net_quantized.save(PRETRAINED)

quantized_model.close()

0 comments on commit 3ed8307

Please sign in to comment.