Skip to content

Commit

Permalink
concentrate speed
Browse files Browse the repository at this point in the history
  • Loading branch information
Snailpong committed Dec 15, 2020
1 parent 53dbcbf commit b1d148b
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 35 deletions.
30 changes: 15 additions & 15 deletions feature_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from matrix_compute import get_gxyz
from filter_func import get_normalized_gaussian
from kmeans_vector import KMeans_Vector
from preprocessing import get_array_data

import filter_constant as C

Expand Down Expand Up @@ -40,9 +41,8 @@ def get_features(patchX, patchY, patchZ, weight):

trace, fa, mode = get_lamda_u(l1, l2, l3)

# return angle_p, angle_t, math.log(trace), fa, mode
# return angle_p, angle_t, math.log(trace)/4, fa, mode/2, index1, sign
return v1[0], v1[1], v1[2], math.log(trace), fa, mode, index1, sign
# return v1[0], v1[1], v1[2], math.log(trace), fa, mode, index1, sign
return v1[0], v1[1], v1[2], trace*2, fa, mode, index1, sign


@njit
Expand Down Expand Up @@ -72,8 +72,8 @@ def init_buckets(Q_TOTAL):

def k_means_modeling(quantization):

with open('./arrays/qua', 'rb') as p:
quantization = pickle.load(p)
# with open('./arrays/qua', 'rb') as p:
# quantization = pickle.load(p)

kmeans_angle = KMeans_Vector(n_clusters=C.Q_ANGLE, verbose=True, max_iter=30, n_init=1)
kmeans_angle.fit(quantization[:, :3])
Expand All @@ -84,24 +84,24 @@ def k_means_modeling(quantization):
return kmeans_angle, kmeans_tensor


def make_kmeans_model():
def make_kmeans_model(file_list):
G_WEIGHT = get_normalized_gaussian()

MAX_POINTS = 15000000
patchNumber = 0
point_space = np.zeros((MAX_POINTS, 6))

# for file_idx, file in enumerate(file_list):
# print('\r', end='')
# print('' * 60, end='')
# print('\r Making Point Space: '+ file.split('\\')[-1] + str(MAX_POINTS) + ' patches (' + str(100*patchNumber/MAX_POINTS) + '%)')
for file_idx, file in enumerate(file_list):
print('\r', end='')
print('' * 60, end='')
print('\r Making Point Space: '+ file.split('\\')[-1] + str(MAX_POINTS) + ' patches (' + str(100*patchNumber/MAX_POINTS) + '%)')

# im_HR, im_LR = get_array_data(file, training=True)
# im_GX, im_GY, im_GZ = np.gradient(im_LR)
im_HR, im_LR = get_array_data(file, training=True)
im_GX, im_GY, im_GZ = np.gradient(im_LR)

# point_space, patchNumber = make_point_space(im_LR, im_GX, im_GY, im_GZ, patchNumber, G_WEIGHT, point_space, MAX_POINTS)
# if patchNumber > MAX_POINTS / 2:
# break
point_space, patchNumber = make_point_space(im_LR, im_GX, im_GY, im_GZ, patchNumber, G_WEIGHT, point_space, MAX_POINTS)
if patchNumber > MAX_POINTS / 2:
break

quantization = point_space[0:patchNumber, :]

Expand Down
2 changes: 1 addition & 1 deletion filter_constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
RESULT_DIR = "./result/"

Q_ANGLE = 3#4
Q_TENSOR = 341#170#128
Q_TENSOR = 170#341#170#128

PATCH_SIZE = 11
PATCH_HALF = PATCH_SIZE // 2
Expand Down
8 changes: 3 additions & 5 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

import filter_constant as C

from crop_black import *
from feature_model import *
from filter_func import *
from kmeans_vector import KMeans_Vector
Expand Down Expand Up @@ -75,8 +74,6 @@ def make_hr_yz(i1, result_image, im_LR, jS, h, iS):
if im_LR[i1, j1, k1] == 0:
continue

# patch1 = patch.ravel()
# print(iS[cnt][0])
patch = np.transpose(patch, iS[cnt][0])

if iS[cnt][1][0] < 0:
Expand All @@ -89,8 +86,7 @@ def make_hr_yz(i1, result_image, im_LR, jS, h, iS):
elif iS[cnt][1][1] < 0 and iS[cnt][1][2] < 0:
patch = np.flip(patch, axis=0)

patch1 = np.append(patch, 1).astype(np.float32)
result_image[i1, j1, k1] = np.dot(patch1, h[jS[cnt]])
result_image[i1, j1, k1] = (patch * h_comb[jS[cnt]]).sum() + h_bias[jS[cnt]]
cnt += 1

return result_image
Expand All @@ -109,6 +105,8 @@ def make_hr_yz(i1, result_image, im_LR, jS, h, iS):
G_WEIGHT = get_normalized_gaussian()

h = np.load('./arrays/h_{}x_{}.npy'.format(C.R, C.Q_TOTAL))
h_comb = h[:, :-1].reshape(h.shape[0], C.PATCH_SIZE, C.PATCH_SIZE, C.PATCH_SIZE)
h_bias = h[:, -1]
kmeans = load_kmeans_model()

filestart = time.time()
Expand Down
5 changes: 2 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

import filter_constant as C

from crop_black import *
from feature_model import *
from filter_func import *
from kmeans_vector import KMeans_Vector
Expand Down Expand Up @@ -106,8 +105,8 @@ def train_qv(im_LR, im_HR, w, kmeans, Q, V, count):

G_WEIGHT = get_normalized_gaussian()

# kmeans = make_kmeans_model()
kmeans = load_kmeans_model()
kmeans = make_kmeans_model(file_list)
# kmeans = load_kmeans_model()

start = time.time()

Expand Down
11 changes: 0 additions & 11 deletions util.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,7 @@
import pickle
import nibabel as nib

from crop_black import *
from filter_func import *
from get_lr import *
from hashtable import *
from matrix_compute import *
from util import *

Expand All @@ -24,14 +21,6 @@ def make_dataset(dir):
return images


def ask_save_qv(Q, V, finished_files):
try:
a = input_timer("\r Enter to save >> ", 10)
save_qv(Q, V, finished_files)
except TimeoutError as e:
pass


def save_qv(Q, V, finished_files, count):
print('\rSaving QVF...', end='', flush=True)
np.savez('./arrays/QVF_{}'.format(C.R), Q=Q, V=V, finished_files=np.array(finished_files), count=count)
Expand Down

0 comments on commit b1d148b

Please sign in to comment.