Skip to content

Commit 2580cfa

Browse files
authored
Fix upload es (redhat-performance#377)
* fix es index * fix upload to es parse error * fix template metrics
1 parent 68492cf commit 2580cfa

File tree

4 files changed

+59
-22
lines changed

4 files changed

+59
-22
lines changed

benchmark_runner/common/template_operations/templates/vdbench/vdbench_data_template.yaml

+20-2
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,28 @@ template_data:
1010
BLOCK_SIZES: oltp1,oltp2,oltphw,odss2,odss128,4_cache,64_cache,4,64,4_cache,64_cache,4,64
1111
IO_OPERATION: oltp1,oltp2,oltphw,odss2,odss128,read,read,read,read,write,write,write,write
1212
IO_THREADS: 1,1,1,1,1,4,4,2,2,4,4,2,2
13+
# How file IO will be done
1314
FILES_IO: oltp1,oltp2,oltphw,odss2,odss128,random,random,random,random,random,random,random,random
15+
# an integer or "max"
1416
IO_RATE: max,max,max,max,max,max,max,max,max,max,max,max,max
17+
# used for mixed workload 0-100
1518
MIX_PRECENTAGE:
19+
# duration time in sec
1620
DURATION: 600
21+
# pause after every test in sec
1722
PAUSE: 20
23+
# warmup before any test in sec
1824
WARMUP: 20
25+
# This parameter allows you to select directories and files for processing either sequential/random
1926
FILES_SELECTION: random
27+
# ratio is 1:X e.g 2 = 50% compressible
2028
COMPRESSION_RATIO: 2
29+
# will it run a fillup before testing starts yes/no
2130
RUN_FILLUP: "yes"
31+
# how many directories to create
2232
DIRECTORIES: 600
2333
FILES_PER_DIRECTORY: 10
34+
# size in MB
2435
SIZE_PER_FILE: 10
2536
limits_cpu: 2
2637
limits_memory: 4Gi
@@ -33,17 +44,24 @@ template_data:
3344
IO_THREADS: 1,3
3445
FILES_IO: random,oltp1
3546
IO_RATE: max,max
47+
# used for mixed workload 0-100
3648
MIX_PRECENTAGE:
49+
# duration time in sec
3750
DURATION: 20
51+
# pause after every test in sec
3852
PAUSE: 0
53+
# warmup before any test in sec
3954
WARMUP: 0
40-
# sequential/random
55+
# This parameter allows you to select directories and files for processing either sequential/random
4156
FILES_SELECTION: random
57+
# ratio is 1:X e.g 2 = 50% compressible
4258
COMPRESSION_RATIO: 2
43-
# yes/no
59+
# will it run a fillup before testing starts yes/no
4460
RUN_FILLUP: "no"
61+
# how many directories to create
4562
DIRECTORIES: 300
4663
FILES_PER_DIRECTORY: 3
64+
# size in MB
4765
SIZE_PER_FILE: 10
4866
limits_cpu: 2
4967
limits_memory: 2Gi

benchmark_runner/workloads/workloads.py

+20-9
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,11 @@ def vdbench_pod(self, name: str = ''):
5050
self.__status = self._oc.wait_for_pod_completed(label=f'app=vdbench-{self._trunc_uuid}', label_uuid=False, job=False)
5151
self.__status = 'complete' if self.__status else 'failed'
5252
# save run artifacts logs
53-
result = self._create_pod_run_artifacts(pod_name=self.__pod_name)
53+
result_list = self._create_pod_run_artifacts(pod_name=self.__pod_name)
5454
if self._es_host:
55-
self._upload_to_es(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
55+
# upload several run results
56+
for result in result_list:
57+
self._upload_to_es(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
5658
# verify that data upload to elastic search according to unique uuid
5759
self._verify_es_data_uploaded(index=self.__es_index, uuid=self._uuid)
5860
self._oc.delete_pod_sync(
@@ -65,8 +67,12 @@ def vdbench_pod(self, name: str = ''):
6567
raise err
6668
except Exception as err:
6769
# save run artifacts logs
68-
result = self._create_pod_run_artifacts(pod_name=self.__pod_name)
69-
self._upload_to_es(index=self.__es_index, kind=self.__kind, status='failed', result=result)
70+
result_list = self._create_pod_run_artifacts(pod_name=self.__pod_name)
71+
# upload several run results
72+
for result in result_list:
73+
self._upload_to_es(index=self.__es_index, kind=self.__kind, status='failed', result=result)
74+
# verify that data upload to elastic search according to unique uuid
75+
self._verify_es_data_uploaded(index=self.__es_index, uuid=self._uuid)
7076
self._oc.delete_pod_sync(
7177
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.vdbench_pod.__name__}.yaml'),
7278
pod_name=self.__pod_name)
@@ -91,7 +97,6 @@ def vdbench_vm(self):
9197
self.__es_index = 'vdbench-test-ci-results'
9298
else:
9399
self.__es_index = 'vdbench-results'
94-
95100
self.__workload = self.vdbench_vm.__name__.replace('_', '-')
96101
self.__vm_name = f'{self.__workload}-{self._trunc_uuid}'
97102
self.__kind = 'vm'
@@ -103,9 +108,11 @@ def vdbench_vm(self):
103108
self.__status = self._oc.wait_for_vm_log_completed(vm_name=self.__vm_name, end_stamp='@@~@@END-WORKLOAD@@~@@')
104109
self.__status = 'complete' if self.__status else 'failed'
105110
# save run artifacts logs
106-
result = self._create_vm_run_artifacts(vm_name=self.__vm_name, start_stamp=self.__vm_name, end_stamp='@@~@@END-WORKLOAD@@~@@')
111+
result_list = self._create_vm_run_artifacts(vm_name=self.__vm_name, start_stamp=self.__vm_name, end_stamp='@@~@@END-WORKLOAD@@~@@')
107112
if self._es_host:
108-
self._upload_to_es(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
113+
# upload several run results
114+
for result in result_list:
115+
self._upload_to_es(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
109116
# verify that data upload to elastic search according to unique uuid
110117
self._verify_es_data_uploaded(index=self.__es_index, uuid=self._uuid)
111118
self._oc.delete_vm_sync(
@@ -118,8 +125,12 @@ def vdbench_vm(self):
118125
raise err
119126
except Exception as err:
120127
# save run artifacts logs
121-
result = self._create_vm_run_artifacts(vm_name=self.__vm_name, start_stamp=self.__vm_name, end_stamp='@@~@@END-WORKLOAD@@~@@')
122-
self._upload_to_es(index=self.__es_index, kind=self.__kind, status='failed', result=result)
128+
result_list = self._create_vm_run_artifacts(vm_name=self.__vm_name, start_stamp=self.__vm_name, end_stamp='@@~@@END-WORKLOAD@@~@@')
129+
# upload several run results
130+
for result in result_list:
131+
self._upload_to_es(index=self.__es_index, kind=self.__kind, status='failed', result=result)
132+
# verify that data upload to elastic search according to unique uuid
133+
self._verify_es_data_uploaded(index=self.__es_index, uuid=self._uuid)
123134
self._oc.delete_vm_sync(
124135
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.vdbench_vm.__name__}.yaml'),
125136
vm_name=self.__vm_name)

benchmark_runner/workloads/workloads_operations.py

+18-10
Original file line numberDiff line numberDiff line change
@@ -177,18 +177,22 @@ def _create_pod_run_artifacts(self, pod_name: str):
177177
:param pod_name: pod name
178178
:return: run results dict
179179
"""
180-
result_dict = {'pod': pod_name}
180+
result_list = []
181181
pod_log_file = self.__create_pod_log(pod=pod_name)
182182
workload_name = self._environment_variables_dict.get('workload', '').replace('_', '-')
183183
# csv to dictionary
184184
the_reader = DictReader(open(pod_log_file, 'r'))
185185
for line_dict in the_reader:
186186
for key, value in line_dict.items():
187187
if self.__is_float(value):
188-
line_dict[key] = float(value)
189-
result_dict[line_dict['Run']] = line_dict
190-
result_dict['run_artifacts_url'] = os.path.join(self._run_artifacts_url, f'{self.__get_run_artifacts_hierarchy(workload_name=workload_name, is_file=True)}-{self._time_stamp_format}.tar.gz')
191-
return result_dict
188+
num = float(value)
189+
line_dict[key] = round(num, 3)
190+
elif value == 'n/a':
191+
line_dict[key] = 0.0
192+
line_dict['pod_name'] = pod_name
193+
line_dict['run_artifacts_url'] = os.path.join(self._run_artifacts_url, f'{self.__get_run_artifacts_hierarchy(workload_name=workload_name, is_file=True)}-{self._time_stamp_format}.tar.gz')
194+
result_list.append(dict(line_dict))
195+
return result_list
192196

193197
def _create_vm_run_artifacts(self, vm_name: str, start_stamp: str, end_stamp: str):
194198
"""
@@ -198,7 +202,7 @@ def _create_vm_run_artifacts(self, vm_name: str, start_stamp: str, end_stamp: st
198202
:param end_stamp: end stamp
199203
:return: run results dict
200204
"""
201-
result_dict = {'vm': vm_name}
205+
result_list = []
202206
results_list = self._oc.extract_vm_results(vm_name=vm_name, start_stamp=start_stamp, end_stamp=end_stamp)
203207
workload_name = self._environment_variables_dict.get('workload', '').replace('_', '-')
204208
# insert results to csv
@@ -211,10 +215,14 @@ def _create_vm_run_artifacts(self, vm_name: str, start_stamp: str, end_stamp: st
211215
for line_dict in the_reader:
212216
for key, value in line_dict.items():
213217
if self.__is_float(value):
214-
line_dict[key] = float(value)
215-
result_dict[line_dict['Run']] = line_dict
216-
result_dict['run_artifacts_url'] = os.path.join(self._run_artifacts_url, f'{self.__get_run_artifacts_hierarchy(workload_name=workload_name, is_file=True)}-{self._time_stamp_format}.tar.gz')
217-
return result_dict
218+
num = float(value)
219+
line_dict[key] = round(num, 3)
220+
elif value == 'n/a':
221+
line_dict[key] = 0.0
222+
line_dict['vm_name'] = vm_name
223+
line_dict['run_artifacts_url'] = os.path.join(self._run_artifacts_url, f'{self.__get_run_artifacts_hierarchy(workload_name=workload_name, is_file=True)}-{self._time_stamp_format}.tar.gz')
224+
result_list.append(dict(line_dict))
225+
return result_list
218226

219227
def __make_run_artifacts_tarfile(self, workload: str):
220228
"""

generate_golden_files.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
python -m venv venv
1+
python3 -m venv venv
22
. venv/bin/activate
33
PYTHONPATH=. python3 tests/unittest/benchmark_runner/common/template_operations/generate_golden_files.py

0 commit comments

Comments
 (0)