forked from ibpsa/project1-boptest
-
Notifications
You must be signed in to change notification settings - Fork 1
/
utilities.py
932 lines (776 loc) · 38.1 KB
/
utilities.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
# -*- coding: utf-8 -*-
"""
This module contains testing utilities used throughout test scripts, including
common functions and partial classes.
"""
import os
import requests
import unittest
import numpy as np
import json
import pandas as pd
import re
import matplotlib.pyplot as plt
def get_root_path():
'''Returns the path to the root repository directory.
'''
testing_path = os.path.dirname(os.path.realpath(__file__));
root_path = os.path.split(testing_path)[0]
return root_path;
def clean_up(dir_path):
'''Cleans up the .fmu, .mo, .txt, .mat, .json files from directory.
Parameters
----------
dir_path : str
Directory path to clean up
'''
files = os.listdir(dir_path)
for f in files:
if f.endswith('.fmu') or f.endswith('.mo') or f.endswith('.txt') or f.endswith('.mat') or f.endswith('.json'):
os.remove(os.path.join(dir_path, f))
def run_tests(test_file_name):
'''Run tests and save results for specified test file.
Parameters
----------
test_file_name : str
Test file name (ends in .py)
'''
# Load tests
test_loader = unittest.TestLoader()
suite = test_loader.discover(os.path.join(get_root_path(),'testing'), pattern = test_file_name)
num_cases = suite.countTestCases()
# Run tests
print('\nFound {0} tests to run in {1}.\n\nRunning...'.format(num_cases, test_file_name))
result = unittest.TextTestRunner(verbosity = 1).run(suite);
# Parse and save results
num_failures = len(result.failures)
num_errors = len(result.errors)
num_passed = num_cases - num_errors - num_failures
log_json = {'TestFile':test_file_name, 'NCases':num_cases, 'NPassed':num_passed, 'NErrors':num_errors, 'NFailures':num_failures, 'Failures':{}, 'Errors':{}}
for i, failure in enumerate(result.failures):
log_json['Failures'][i]= failure[1]
for i, error in enumerate(result.errors):
log_json['Errors'][i]= error[1]
log_file = os.path.splitext(test_file_name)[0] + '.log'
with open(os.path.join(get_root_path(),'testing',log_file), 'w') as f:
json.dump(log_json, f)
def compare_references(vars_timeseries = ['reaTRoo_y'],
refs_old = 'multizone_residential_hydronic_old',
refs_new = 'multizone_residential_hydronic'):
'''Method to perform visual inspection on how references have changed
with respect to a previous version.
Parameters
----------
vars_timeseries : list
List with strings indicating the variables to be plotted in time
series graphs.
refs_old : str
Name of the folder containing the old references.
refs_new : str
Name of the folder containing the new references.
'''
dir_old = os.path.join(get_root_path(), 'testing', 'references', refs_old)
for subdir, _, files in os.walk(dir_old):
for filename in files:
f_old = os.path.join(subdir, filename)
f_new = os.path.join(subdir.replace(refs_old,refs_new), filename)
if not os.path.exists(f_new):
print('File: {} has not been compared since it does not exist anymore.'.format(f_new))
elif not f_old.endswith('.csv'):
print('File: {} has not been compared since it is not a csv file.'.format(f_old))
else:
df_old = pd.read_csv(f_old)
df_new = pd.read_csv(f_new)
if not('time' in df_old.columns or 'keys' in df_old.columns):
print('File: {} has not been compared because the format is not recognized.'.format(f_old))
else:
if 'time' in df_old.columns:
df_old.drop('time', axis=1, inplace=True)
df_new.drop('time', axis=1, inplace=True)
kind = 'line'
vars_to_plot = vars_timeseries
elif 'keys' in df_old.columns:
df_old = df_old.set_index('keys')
df_new = df_new.set_index('keys')
kind = 'bar'
vars_to_plot = df_old.columns
if 'kpis_' in filename:
fig, axs = plt.subplots(nrows=1, ncols=len(df_old.index), figsize=(10,8))
for i,k in enumerate(df_old.index):
axs[i].bar(0, df_old.loc[k,'value'], label='old', alpha=0.5, color='orange')
axs[i].bar(0, df_new.loc[k,'value'], label='new', alpha=0.5, color='blue')
axs[i].set_title(k)
fig.suptitle(str(f_new))
plt.legend()
else:
if any([v in df_old.keys() for v in vars_to_plot]):
for v in vars_to_plot:
if v in df_old.keys():
_, ax = plt.subplots(1, figsize=(10,8))
df_old[v].plot(ax=ax, label='old '+v, kind=kind, alpha=0.5, color='orange')
df_new[v].plot(ax=ax, label='new '+v, kind=kind, alpha=0.5, color='blue')
ax.set_title(str(f_new))
ax.legend()
else:
print('File: {} has not been compared because it does not contain any of the variables to plot'.format(f_old))
plt.show()
class partialChecks(object):
'''This partial class implements common ref data check methods.
'''
def compare_ref_timeseries_df(self, df, ref_filepath):
'''Compare a timeseries dataframe to a reference csv.
Parameters
----------
df : pandas DataFrame
Test dataframe with "time" as index.
ref_filepath : str
Reference file path relative to testing directory.
Returns
-------
None
'''
# Check time is index
assert(df.index.name == 'time')
# Perform test
if os.path.exists(ref_filepath):
# If reference exists, check it
df_ref = pd.read_csv(ref_filepath, index_col='time')
# Check all keys in reference are in test
for key in df_ref.columns.to_list():
self.assertTrue(key in df.columns.to_list(), 'Reference key {0} not in test data.'.format(key))
# Check all keys in test are in reference
for key in df.columns.to_list():
self.assertTrue(key in df_ref.columns.to_list(), 'Test key {0} not in reference data.'.format(key))
# Check trajectories
for key in df.columns:
y_test = self.create_test_points(df[key]).to_numpy()
y_ref = self.create_test_points(df_ref[key]).to_numpy()
results = self.check_trajectory(y_test, y_ref)
self.assertTrue(results['Pass'], '{0} Key is {1}.'.format(results['Message'],key))
else:
# Otherwise, save as reference
df.to_csv(ref_filepath)
return None
def compare_ref_json(self, json_test, ref_filepath):
'''Compare a json to a reference json saved as .json.
Parameters
----------
json_test : Dict
Test json in the form of a dictionary.
ref_filepath : str
Reference .json file path relative to testing directory.
Returns
-------
None
'''
# Perform test
if os.path.exists(ref_filepath):
# If reference exists, check it
with open(ref_filepath, 'r') as f:
json_ref = json.load(f)
self.assertTrue(json_test==json_ref, 'json_test:\n{0}\ndoes not equal\njson_ref:\n{1}'.format(json_test, json_ref))
else:
# Otherwise, save as reference
with open(ref_filepath, 'w') as f:
json.dump(json_test,f)
return None
def compare_ref_values_df(self, df, ref_filepath):
'''Compare a values dataframe to a reference csv.
Parameters
----------
df : pandas DataFrame
Test dataframe with a number of keys as index paired with values.
ref_filepath : str
Reference file path relative to testing directory.
Returns
-------
None
'''
# Check keys is index
assert(df.index.name == 'keys')
assert(df.columns.to_list() == ['value'])
# Perform test
if os.path.exists(ref_filepath):
# If reference exists, check it
df_ref = pd.read_csv(ref_filepath, index_col='keys')
for key in df.index.values:
y_test = [df.loc[key,'value']]
y_ref = [df_ref.loc[key,'value']]
results = self.check_trajectory(y_test, y_ref)
self.assertTrue(results['Pass'], '{0} Key is {1}.'.format(results['Message'],key))
else:
# Otherwise, save as reference
df.to_csv(ref_filepath)
return None
def check_trajectory(self, y_test, y_ref):
'''Check a numeric trajectory against a reference with a tolerance.
Parameters
----------
y_test : list-like of numerics
Test trajectory
y_ref : list-like of numerics
Reference trajectory
Returns
-------
result : dict
Dictionary of result of check.
{'Pass' : bool, True if ErrorMax <= tol, False otherwise.
'ErrorMax' : float or None, Maximum error, None if fail length check
'IndexMax' : int or None, Index of maximum error,None if fail length check
'Message' : str or None, Message if failed check, None if passed.
}
'''
# Set tolerance
tol = 1e-3
# Initialize return dictionary
result = {'Pass' : True,
'ErrorMax' : None,
'IndexMax' : None,
'Message' : None}
# First, check that trajectories are same length
if len(y_test) != len(y_ref):
result['Pass'] = False
result['Message'] = 'Test and reference trajectory not the same length.'
else:
# Initialize error arrays
err_abs = np.zeros(len(y_ref))
err_rel = np.zeros(len(y_ref))
err_fun = np.zeros(len(y_ref))
# Calculate errors
for i in range(len(y_ref)):
# Absolute error
err_abs[i] = np.absolute(y_test[i] - y_ref[i])
# Relative error
if (abs(y_ref[i]) > 10 * tol):
err_rel[i] = err_abs[i] / abs(y_ref[i])
else:
err_rel[i] = 0
# Total error
err_fun[i] = err_abs[i] + err_rel[i]
# Assess error
err_max = max(err_fun);
i_max = np.argmax(err_fun);
if err_max > tol:
result['Pass'] = False
result['ErrorMax'] = err_max,
result['IndexMax'] = i_max,
result['Message'] = 'Max error ({0}) in trajectory greater than tolerance ({1}) at index {2}. y_test: {3}, y_ref:{4}'.format(err_max, tol, i_max, y_test[i_max], y_ref[i_max])
return result
def create_test_points(self, s,n=500):
'''Create interpolated points to test of a certain number.
Useful to reduce number of points to test and to avoid failed tests from
event times being slightly different.
Parameters
----------
s : pandas Series
Series containing test points to create, with index as time floats.
n : int, optional
Number of points to create
Default is 500
Returns
-------
s_test : pandas Series
Series containing interpolated data
'''
# Get data
data = s.to_numpy()
index = s.index.values
# Make interpolated index
t_min = index.min()
t_max = index.max()
t = np.linspace(t_min, t_max, n)
# Interpolate data
data_interp = np.interp(t,index,data)
# Use at most 8 significant digits
data_interp = [ float('{:.8g}'.format(x)) for x in data_interp ]
# Make Series
s_test = pd.Series(data=data_interp, index=t)
return s_test
def results_to_df(self, points, start_time, final_time, url='http:https://127.0.0.1:5000'):
'''Convert results from boptest into pandas DataFrame timeseries.
Parameters
----------
points: list of str
List of points to retrieve from boptest api.
start_time: int
Starting time of data to get in seconds.
final_time: int
Ending time of data to get in seconds.
url: str
URL pointing to deployed boptest test case.
Default is http:https://127.0.0.1:5000.
Returns
-------
df: pandas DataFrame
Timeseries dataframe object with "time" as index in seconds.
'''
df = pd.DataFrame()
for point in points:
res = requests.put('{0}/results'.format(url), data={'point_name':point,'start_time':start_time, 'final_time':final_time}).json()
df = pd.concat((df,pd.DataFrame(data=res[point], index=res['time'],columns=[point])), axis=1)
df.index.name = 'time'
return df
def get_all_points(self, url='localhost:5000'):
'''Get all of the input and measurement point names from boptest.
Parameters
----------
url: str, optional
URL pointing to deployed boptest test case.
Default is localhost:5000.
Returns
-------
points: list of str
List of available point names.
'''
measurements = requests.get('{0}/measurements'.format(url)).json()
inputs = requests.get('{0}/inputs'.format(url)).json()
points = list(measurements.keys()) + list(inputs.keys())
return points
def compare_error_code(self, response, message=None):
status_code = response.status_code
if message is None:
message = response.message
self.assertEqual(status_code, 400, message)
class partialTestAPI(partialChecks):
'''This partial class implements common API tests for test cases.
References to self attributes for the tests should be set in the setUp
method of the particular testclass test. They are:
url : str
URL to deployed testcase.
name : str
Name given to test
inputs_ref : list of str
List of names of inputs
measurements_ref : list of str
List of names of measurements
step_ref : numeric
Default simulation step
'''
def test_get_version(self):
'''Test getting the version of BOPTEST.
'''
# Get version from BOPTEST API
version = requests.get('{0}/version'.format(self.url)).json()
# Create a regex object as three decimal digits seperated by period
r_num = re.compile('\d.\d.\d')
r_x = re.compile('0.x.x')
# Test that the returned version matches the expected string format
if r_num.match(version['version']) or r_x.match(version['version']):
self.assertTrue(True)
else:
self.assertTrue(False, '/version did not return correctly. Returned {0}.'.format(version))
def test_get_name(self):
'''Test getting the name of test.
'''
name = requests.get('{0}/name'.format(self.url)).json()
self.assertEqual(name['name'], self.name)
def test_get_inputs(self):
'''Test getting the input list of tests.
'''
inputs = requests.get('{0}/inputs'.format(self.url)).json()
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'get_inputs.json')
self.compare_ref_json(inputs, ref_filepath)
def test_get_measurements(self):
'''Test getting the measurement list of test.
'''
measurements = requests.get('{0}/measurements'.format(self.url)).json()
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'get_measurements.json')
self.compare_ref_json(measurements, ref_filepath)
def test_get_step(self):
'''Test getting the communication step of test.
'''
step = requests.get('{0}/step'.format(self.url)).json()
df = pd.DataFrame(data=[step], index=['step'], columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'get_step.csv')
self.compare_ref_values_df(df, ref_filepath)
def test_set_step(self):
'''Test setting the communication step of test.
'''
step_current = requests.get('{0}/step'.format(self.url)).json()
step = 101
requests.put('{0}/step'.format(self.url), data={'step':step})
step_set = requests.get('{0}/step'.format(self.url)).json()
self.assertEqual(step, step_set)
requests.put('{0}/step'.format(self.url), data={'step':step_current})
def test_initialize(self):
'''Test initialization of test simulation.
'''
# Get measurements and inputs
points = self.get_all_points(self.url)
# Get current step
step = requests.get('{0}/step'.format(self.url)).json()
# Initialize
start_time = int(0.5*24*3600)
y = requests.put('{0}/initialize'.format(self.url), data={'start_time':start_time, 'warmup_period':int(0.5*24*3600)}).json()
# Check that initialize returns the right initial values and results
df = pd.DataFrame.from_dict(y, orient = 'index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'initial_values.csv')
self.compare_ref_values_df(df, ref_filepath)
# Check trajectories
df = self.results_to_df(points, 0, start_time, self.url)
# Set reference file path
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'results_initialize_initial.csv')
# Check results
self.compare_ref_timeseries_df(df,ref_filepath)
# Check kpis
res_kpi = requests.get('{0}/kpi'.format(self.url)).json()
df = pd.DataFrame.from_dict(res_kpi, orient='index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'kpis_initialize_initial.csv')
self.compare_ref_values_df(df, ref_filepath)
# Advance
step_advance = 1*24*3600
requests.put('{0}/step'.format(self.url), data={'step':step_advance})
y = requests.post('{0}/advance'.format(self.url),data = {}).json()
# Check trajectories
df = self.results_to_df(points, start_time, start_time+step_advance, self.url)
# Set reference file path
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'results_initialize_advance.csv')
# Check results
self.compare_ref_timeseries_df(df,ref_filepath)
# Check kpis
res_kpi = requests.get('{0}/kpi'.format(self.url)).json()
df = pd.DataFrame.from_dict(res_kpi, orient='index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'kpis_initialize_advance.csv')
self.compare_ref_values_df(df, ref_filepath)
# Set step back to step
requests.put('{0}/step'.format(self.url), data={'step':step})
def test_advance_no_data(self):
'''Test advancing of simulation with no input data.
This is a basic test of functionality.
Tests for advancing with overwriting are done in the example tests.
'''
requests.put('{0}/initialize'.format(self.url), data={'start_time':0, 'warmup_period':0})
requests.put('{0}/step'.format(self.url), data={'step':self.step_ref})
y = requests.post('{0}/advance'.format(self.url), data=dict()).json()
df = pd.DataFrame.from_dict(y, orient = 'index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'advance_no_data.csv')
self.compare_ref_values_df(df, ref_filepath)
def test_advance_false_overwrite(self):
'''Test advancing of simulation with overwriting as false.
This is a basic test of functionality.
Tests for advancing with overwriting are done in the example tests.
'''
if self.name == 'testcase1':
u = {'oveAct_activate': 0, 'oveAct_u': 1500}
elif self.name == 'testcase2':
u = {'oveTSetRooHea_activate': 0, 'oveTSetRooHea_u': 273.15+22}
elif self.name == 'testcase3':
u = {'oveActNor_activate': 0, 'oveActNor_u': 1500,
'oveActSou_activate': 0, 'oveActSou_u': 1500}
elif self.name == 'bestest_air':
u = {'fcu_oveTSup_activate': 0, 'fcu_oveTSup_u': 290}
elif self.name == 'bestest_hydronic':
u = {
'oveTSetSup_activate': 0,
'oveTSetSup_u': 273.15+60,
'ovePum_activate': 0,
'ovePum_u': 1
}
elif self.name == 'bestest_hydronic_heat_pump':
u = {'oveTSet_activate': 0, 'oveTSet_u': 273.15+22}
elif self.name == 'multizone_residential_hydronic':
u = {'conHeaRo1_oveTSetHea_activate':0, 'conHeaRo1_oveTSetHea_u':273.15+22,
'oveEmiPum_activate':0, 'oveEmiPum_u':1}
elif self.name == 'singlezone_commercial_hydronic':
u = {'oveTSupSet_activate':0, 'oveTSupSet_u':273.15+25,
'oveTZonSet_activate':0, 'oveTZonSet_u':273.15+25}
elif self.name == 'multizone_office_simple_air':
u = {'hvac_oveAhu_TSupSet_activate':0, 'hvac_oveAhu_TSupSet_u':273.15+22}
else:
raise Exception('Need to specify u for this test case')
requests.put('{0}/initialize'.format(self.url), data={'start_time':0, 'warmup_period':0})
requests.put('{0}/step'.format(self.url), data={'step': self.step_ref})
y = requests.post('{0}/advance'.format(self.url), data=u).json()
df = pd.DataFrame.from_dict(y, orient='index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'advance_false_overwrite.csv')
self.compare_ref_values_df(df, ref_filepath)
def test_get_forecast_default(self):
'''Check that the forecaster is able to retrieve the data.
Default forecast parameters for testcase used.
'''
# Initialize
requests.put('{0}/initialize'.format(self.url), data={'start_time':0, 'warmup_period':0})
# Test case forecast
forecast = requests.get('{0}/forecast'.format(self.url)).json()
df_forecaster = pd.DataFrame(forecast).set_index('time')
# Set reference file path
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'get_forecast_default.csv')
# Check the forecast
self.compare_ref_timeseries_df(df_forecaster, ref_filepath)
def test_put_and_get_parameters(self):
'''Check PUT and GET of forecast settings.
'''
# Define forecast parameters
forecast_parameters_ref = {'horizon': 3600, 'interval':300}
# Set forecast parameters
ret = requests.put('{0}/forecast_parameters'.format(self.url),
data=forecast_parameters_ref).json()
# Get forecast parameters
forecast_parameters = requests.get('{0}/forecast_parameters'.format(self.url)).json()
# Check the forecast parameters
self.assertDictEqual(forecast_parameters, forecast_parameters_ref)
# Check the return on the put request
self.assertDictEqual(ret, forecast_parameters_ref)
def test_get_forecast_with_parameters(self):
'''Check that the forecaster is able to retrieve the data.
Custom forecast parameters used.
'''
# Define forecast parameters
forecast_parameters_ref = {'horizon': 3600, 'interval':300}
# Initialize
requests.put('{0}/initialize'.format(self.url), data={'start_time':0, 'warmup_period':0})
# Set forecast parameters
requests.put('{0}/forecast_parameters'.format(self.url),
data=forecast_parameters_ref)
# Test case forecast
forecast = requests.get('{0}/forecast'.format(self.url)).json()
df_forecaster = pd.DataFrame(forecast).set_index('time')
# Set reference file path
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'get_forecast_with_parameters.csv')
# Check the forecast
self.compare_ref_timeseries_df(df_forecaster, ref_filepath)
def test_set_get_scenario(self):
'''Test setting and getting the scenario of test.
'''
# Set scenario
scenario_current = requests.get('{0}/scenario'.format(self.url)).json()
scenario = {'electricity_price':'highly_dynamic',
'time_period':self.test_time_period}
requests.put('{0}/scenario'.format(self.url), data=scenario)
scenario_set = requests.get('{0}/scenario'.format(self.url)).json()
self.assertEqual(scenario, scenario_set)
# Check initialized correctly
points = self.get_all_points(self.url)
# Don't check weather
points_check = []
for key in points:
if 'weaSta' not in key:
points_check.append(key)
df = self.results_to_df(points_check, -np.inf, np.inf, self.url)
# Set reference file path
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'results_set_scenario.csv')
# Check results
self.compare_ref_timeseries_df(df,ref_filepath)
# Return scenario to original
requests.put('{0}/scenario'.format(self.url), data=scenario_current)
def test_partial_results_inner(self):
'''Test getting results for start time after and final time before.
'''
measurement_list = {'testcase1': 'PHea_y',
'testcase2': 'PFan_y',
'testcase3': 'CO2RooAirSou_y',
'bestest_hydronic':'reaQHea_y',
'bestest_air':'zon_weaSta_reaWeaSolHouAng_y',
'bestest_hydronic_heat_pump':'weaSta_reaWeaPAtm_y',
'multizone_residential_hydronic':'weatherStation_reaWeaWinSpe_y',
'singlezone_commercial_hydronic':'ahu_reaTRetAir_y',
'multizone_office_simple_air':'hvac_reaAhu_PPumHea_y'}
requests.put('{0}/initialize'.format(self.url), data={'start_time': 0, 'warmup_period': 0})
requests.put('{0}/step'.format(self.url), data={'step': self.step_ref})
measurements = requests.get('{0}/measurements'.format(self.url)).json()
y = requests.post('{0}/advance'.format(self.url), data=dict()).json()
point = measurement_list[self.name]
if point not in measurements:
raise KeyError('Point {0} not in measurements list.'.format(point))
res_inner = requests.put('{0}/results'.format(self.url), data={'point_name': point,
'start_time': self.step_ref*0.25,
'final_time': self.step_ref*0.75}).json()
df = pd.DataFrame.from_dict(res_inner).set_index('time')
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'partial_results_inner.csv')
self.compare_ref_timeseries_df(df, ref_filepath)
def test_partial_results_outer(self):
'''Test getting results for start time before and final time after.
'''
measurement_list = {'testcase1': 'PHea_y',
'testcase2': 'PFan_y',
'testcase3': 'CO2RooAirSou_y',
'bestest_hydronic':'reaQHea_y',
'bestest_air':'zon_weaSta_reaWeaSolHouAng_y',
'bestest_hydronic_heat_pump':'weaSta_reaWeaPAtm_y',
'multizone_residential_hydronic':'weatherStation_reaWeaWinSpe_y',
'singlezone_commercial_hydronic':'ahu_reaTRetAir_y',
'multizone_office_simple_air':'hvac_reaAhu_PPumHea_y'}
requests.put('{0}/initialize'.format(self.url), data={'start_time': 0, 'warmup_period': 0})
requests.put('{0}/step'.format(self.url), data={'step':self.step_ref})
measurements = requests.get('{0}/measurements'.format(self.url)).json()
y = requests.post('{0}/advance'.format(self.url), data=dict()).json()
point = measurement_list[self.name]
if point not in measurements:
raise KeyError('Point {0} not in measurements list.'.format(point))
res_outer = requests.put('{0}/results'.format(self.url), data={'point_name': point,
'start_time': 0-self.step_ref,
'final_time': self.step_ref*2}).json()
df = pd.DataFrame.from_dict(res_outer).set_index('time')
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'partial_results_outer.csv')
self.compare_ref_timeseries_df(df, ref_filepath)
def test_invalid_step(self):
'''Test set step with invalid (non-numeric) value returns a 400 error.
'''
scenario = {'time_period': 'test_day'}
requests.put('{0}/scenario'.format(self.url), data=scenario)
# Try simulating past test period
step = "5*7*24*3600"
payload = requests.put('{0}/step'.format(self.url), data={'step': step})
self.compare_error_code(payload, "Invalid step did not return 400 message.")
def test_invalid_forecast_parameters(self):
'''Check that the setting forecast parameter with invalid start or horizon returns 400 error.
'''
# Define forecast parameters
forecast_parameters_ref = {'horizon': 'foo', 'interval': 300}
# Initialize
requests.put('{0}/initialize'.format(self.url), data={'start_time': 0, 'warmup_period': 0})
# Set forecast parameters
payload = requests.put('{0}/forecast_parameters'.format(self.url),
data=forecast_parameters_ref)
self.compare_error_code(payload, "Invalid forecast_parameters request did not return 400 message.")
forecast_parameters_ref = {'horizon': 3600, 'interval': 'bar'}
payload = requests.put('{0}/forecast_parameters'.format(self.url),
data=forecast_parameters_ref)
self.compare_error_code(payload, "Invalid forecast_parameters request did not return 400 message.")
def test_invalid_scenario(self):
'''Test setting sceanrio with invalid identifier.
'''
# Set scenario
scenario_current = requests.get('{0}/scenario'.format(self.url)).json()
scenario = {'electricity_price': 'invalid_scnario', 'time_period': self.test_time_period}
payload = requests.put('{0}/scenario'.format(self.url), data=scenario)
self.compare_error_code(payload, "Invalid set scenario request did not return 400 message.")
def test_invalid_initialize(self):
'''Test initialization of test simulation with invalid start_time returns 400 error.
'''
points = self.get_all_points(self.url)
# Get current step
step = requests.get('{0}/step'.format(self.url)).json()
# Initialize
start_time = "0.5 * 24 * 3600"
y = requests.put('{0}/initialize'.format(self.url),
data={'start_time': start_time, 'warmup_period': int(0.5 * 24 * 3600)})
self.compare_error_code(y, "Invalid initialize request did not return 400 message.")
def test_invalid_advance(self):
'''Test advancing of simulation with invalid input data type (non-numerical) will return 400 error.
This is a basic test of functionality.
'''
if self.name == 'testcase1':
u = {'oveAct_activate': 0, 'oveAct_u': 1500}
elif self.name == 'testcase2':
u = {'oveTSetRooHea_activate': 0, 'oveTSetRooHea_u': 273.15 + 22}
elif self.name == 'testcase3':
u = {'oveActNor_activate': 0, 'oveActNor_u': 1500,
'oveActSou_activate': 0, 'oveActSou_u': 1500}
elif self.name == 'bestest_air':
u = {'fcu_oveTSup_activate': 0, 'fcu_oveTSup_u': 290}
elif self.name == 'bestest_hydronic':
u = {
'oveTSetSup_activate': 0,
'oveTSetSup_u': 273.15 + 60,
'ovePum_activate': 0,
'ovePum_u': 1
}
elif self.name == 'bestest_hydronic_heat_pump':
u = {'oveTSet_activate': 0, 'oveTSet_u': 273.15 + 22}
elif self.name == 'multizone_residential_hydronic':
u = {'conHeaRo1_oveTSetHea_activate': 0, 'conHeaRo1_oveTSetHea_u': 273.15 + 22,
'oveEmiPum_activate': 0, 'oveEmiPum_u': 1}
elif self.name == 'singlezone_commercial_hydronic':
u = {'oveTSupSet_activate': 0, 'oveTSupSet_u': 273.15 + 25,
'oveTZonSet_activate': 0, 'oveTZonSet_u': 273.15 + 25}
elif self.name == 'multizone_office_simple_air':
u = {'hvac_oveAhu_TSupSet_activate': 0, 'hvac_oveAhu_TSupSet_u': 273.15 + 22}
else:
raise Exception('Need to specify u for this test case')
for key, value in u.items():
u[key] = "invalid"
requests.put('{0}/initialize'.format(self.url), data={'start_time': 0, 'warmup_period': 0})
requests.put('{0}/step'.format(self.url), data={'step': self.step_ref})
y = requests.post('{0}/advance'.format(self.url), data=u)
self.compare_error_code(y, "Invalid advance request did not return 400 message.")
class partialTestTimePeriod(partialChecks):
'''Partial class for testing the time periods for each test case
'''
def run_time_period(self, time_period):
'''Runs the example and tests the kpi and trajectory results for time period.
Parameters
----------
time_period: str
Name of test_period to run
Returns
-------
None
'''
# Set time period scenario
requests.put('{0}/scenario'.format(self.url), data={'time_period':time_period})
# Simulation Loop
y = 1
while y:
# Advance simulation
y = requests.post('{0}/advance'.format(self.url), data={}).json()
# Check results
df = self.results_to_df(self.points_check, -np.inf, np.inf, self.url)
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'results_{0}.csv'.format(time_period))
self.compare_ref_timeseries_df(df,ref_filepath)
# For each price scenario
for price_scenario in ['constant', 'dynamic', 'highly_dynamic']:
# Set scenario
requests.put('{0}/scenario'.format(self.url), data={'electricity_price':price_scenario})
# Report kpis
res_kpi = requests.get('{0}/kpi'.format(self.url)).json()
# Check kpis
df = pd.DataFrame.from_dict(res_kpi, orient='index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'kpis_{0}_{1}.csv'.format(time_period, price_scenario))
self.compare_ref_values_df(df, ref_filepath)
requests.put('{0}/scenario'.format(self.url), data={'electricity_price':'constant'})
class partialTestSeason(partialChecks):
'''Partial class for testing the time periods for each test case
'''
def run_season(self, season):
'''Runs the example and tests the kpi and trajectory results for a season.
Parameters
----------
season: str
Name of season to run.
'winter' or 'summer' or 'shoulder'
Returns
-------
None
'''
if season == 'winter':
start_time = 1*24*3600
elif season == 'summer':
start_time = 248*24*3600
elif season == 'shoulder':
start_time = 118*24*3600
else:
raise ValueError('Season {0} unknown.'.format(season))
length = 48*3600
# Initialize test case
requests.put('{0}/initialize'.format(self.url), data={'start_time':start_time, 'warmup_period':0})
# Get default simulation step
step_def = requests.get('{0}/step'.format(self.url)).json()
# Simulation Loop
for i in range(int(length/step_def)):
# Advance simulation
requests.post('{0}/advance'.format(self.url), data={}).json()
requests.put('{0}/scenario'.format(self.url), data={'electricity_price':'constant'})
# Check results
points = self.get_all_points(self.url)
df = self.results_to_df(points, start_time, start_time+length, self.url)
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'results_{0}.csv'.format(season))
self.compare_ref_timeseries_df(df,ref_filepath)
# For each price scenario
for price_scenario in ['constant', 'dynamic', 'highly_dynamic']:
# Set scenario
requests.put('{0}/scenario'.format(self.url), data={'electricity_price':price_scenario})
# Report kpis
res_kpi = requests.get('{0}/kpi'.format(self.url)).json()
# Check kpis
df = pd.DataFrame.from_dict(res_kpi, orient='index', columns=['value'])
df.index.name = 'keys'
ref_filepath = os.path.join(get_root_path(), 'testing', 'references', self.name, 'kpis_{0}_{1}.csv'.format(season, price_scenario))
self.compare_ref_values_df(df, ref_filepath)