forked from eliberis/uNAS
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_load_pickle_sc.py
74 lines (61 loc) · 2.72 KB
/
test_load_pickle_sc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import pickle
# Load
with open('artifacts/cnn_speech_commands/example_cnn_speech_commands_struct_pru_agingevosearch_state.pickle', 'rb') as f:
EvaluatedPoint = pickle.load(f)
print("------------------------------")
print("len of EvaluatedPoint:",len(EvaluatedPoint))
print("-----------------------------")
'''
@dataclass
class EvaluatedPoint:
point: ArchitecturePoint
val_error: float
test_error: float
resource_features: List[Union[int, float]]
'''
import numpy as np
import tensorflow as tf
from architecture import Architecture
from cnn import CnnSearchSpace
from resource_models.models import model_size, peak_memory_usage
output_dir = "tmp/tflite"
input_shape = (49, 40, 1)
num_classes = 10
def get_resource_requirements(arch: Architecture):
rg = arch.to_resource_graph(input_shape, num_classes)
return model_size(rg), peak_memory_usage(rg, exclude_inputs=False)
def convert_to_tflite(arch, output_file):
model = arch.to_keras_model(input_shape, num_classes)
model.summary()
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = \
lambda: [[np.random.random((1,) + input_shape).astype("float32")] for _ in range(5)]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
model_bytes = converter.convert()
with open(output_file, "wb") as f:
f.write(model_bytes)
import csv
# 開啟
with open(f"{output_dir}/speech_command_EvaluatedPoint_point_arch.csv", "w", newline="") as csvfile:
wr = csv.writer(csvfile)
wr.writerow(["id", "val_acc", "test_acc", "peak_memory_usage", "model_size", "inference_latency"])
for i in range(0, 2000):
val_error = EvaluatedPoint[i].val_error
test_error = EvaluatedPoint[i].test_error
resource_features = EvaluatedPoint[i].resource_features
'''
cnn_arch = EvaluatedPoint[i*100-1].point.arch
print("------------------------------")
print(f"val_error of speech_command_EvaluatedPoint[{i*100-1}]_point_arch:", val_error)
print(f"test_error of speech_command_EvaluatedPoint[{i*100-1}]_point_arch:", test_error)
print("resource_features: [peak_memory_usage, model_size, inference_latency]")
print(f"resource_features of speech_command_EvaluatedPoint[{i*100-1}]_point_arch:", resource_features)
print("------------------------------")
convert_to_tflite(cnn_arch, output_file=f"{output_dir}/speech_command_EvaluatedPoint[{i*100-1}]_point_arch.tflite")
'''
EvaluatedPoint_list = [i, 1-val_error, 1-test_error]
EvaluatedPoint_list.extend(resource_features)
wr.writerow(EvaluatedPoint_list)