Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
S2-group
GitHub Repository: S2-group/android-runner
Path: blob/master/examples/plugintest/Plugins/AndroidPlugin.py
908 views
1
import csv
2
import os
3
import os.path as op
4
import threading
5
import time
6
import timeit
7
from collections import OrderedDict
8
9
from .Profiler import Profiler
10
from functools import reduce
11
12
13
class ConfigError(Exception):
14
pass
15
16
17
class AndroidPlugin(Profiler):
18
def __init__(self, config, paths):
19
super(AndroidPlugin, self).__init__(config, paths)
20
self.output_dir = ''
21
self.paths = paths
22
self.profile = False
23
available_data_points = ['cpu', 'mem']
24
self.interval = float(self.is_integer(config.get('sample_interval', 0))) / 1000
25
self.data_points = config['data_points']
26
invalid_data_points = [dp for dp in config['data_points'] if dp not in set(available_data_points)]
27
if invalid_data_points:
28
self.logger.warning('Invalid data points in config: {}'.format(invalid_data_points))
29
self.data_points = [dp for dp in config['data_points'] if dp in set(available_data_points)]
30
self.data = [['datetime'] + self.data_points]
31
32
@staticmethod
33
def get_cpu_usage(device):
34
"""Get CPU usage in percentage"""
35
# return device.shell('dumpsys cpuinfo | grep TOTAL | cut -d" " -f1').strip()[:-1]
36
shell_result = device.shell('dumpsys cpuinfo | grep TOTAL')
37
shell_splitted = shell_result.split('%')[0]
38
if '.-' in shell_splitted:
39
shell_splitted = shell_splitted.replace('.-', '.')
40
return shell_splitted
41
# return device.shell('dumpsys cpuinfo | grep TOTAL').split('%')[0]
42
43
@staticmethod
44
def get_mem_usage(device, app):
45
"""Get memory usage in KB for app, if app is None system usage is used"""
46
if not app:
47
# return device.shell('dumpsys meminfo | grep Used | cut -d" " -f5').strip()[1:-1]
48
# return device.shell('dumpsys meminfo | grep Used').split()[2].strip()[1:-1].replace(",", ".")
49
return device.shell('dumpsys meminfo | grep Used').translate(str.maketrans('','', '(kB,K')).split()[2]
50
else:
51
result = device.shell('dumpsys meminfo {} | grep TOTAL'.format(app))
52
if result == '':
53
result = device.shell('dumpsys meminfo {}'.format(app))
54
if 'No process found' in result:
55
raise Exception('Android Profiler: {}'.format(result))
56
return ' '.join(result.strip().split()).split()[1]
57
58
def start_profiling(self, device, **kwargs):
59
self.profile = True
60
app = kwargs.get('app', None)
61
self.get_data(device, app)
62
63
def get_data(self, device, app):
64
"""Runs the profiling methods every self.interval seconds in a separate thread"""
65
start = timeit.default_timer()
66
device_time = device.shell('date -u')
67
row = [device_time]
68
if 'cpu' in self.data_points:
69
row.append(self.get_cpu_usage(device))
70
if 'mem' in self.data_points:
71
row.append(self.get_mem_usage(device, app))
72
self.data.append(row)
73
end = timeit.default_timer()
74
# timer results could be negative
75
interval = max(float(0), self.interval - max(0, int(end - start)))
76
if self.profile:
77
threading.Timer(interval, self.get_data, args=(device, app)).start()
78
79
def stop_profiling(self, device, **kwargs):
80
self.profile = False
81
82
def collect_results(self, device):
83
filename = '{}_{}.csv'.format(device.id, time.strftime('%Y.%m.%d_%H%M%S'))
84
with open(op.join(self.output_dir, filename), 'w+') as f:
85
writer = csv.writer(f)
86
for row in self.data:
87
writer.writerow(row)
88
89
def set_output(self, output_dir):
90
self.output_dir = output_dir
91
92
def dependencies(self):
93
return []
94
95
def load(self, device):
96
return
97
98
def unload(self, device):
99
return
100
101
def aggregate_subject(self):
102
filename = os.path.join(self.output_dir, 'Aggregated.csv')
103
subject_rows = list()
104
subject_rows.append(self.aggregate_android_subject(self.output_dir))
105
self.write_to_file(filename, subject_rows)
106
107
def aggregate_end(self, data_dir, output_file):
108
rows = self.aggregate_final(data_dir)
109
self.write_to_file(output_file, rows)
110
111
@staticmethod
112
def aggregate_android_subject(logs_dir):
113
def add_row(accum, new):
114
row = {k: v + float(new[k]) for k, v in list(accum.items()) if k not in ['Component', 'count']}
115
count = accum['count'] + 1
116
return dict(row, **{'count': count})
117
118
runs = []
119
for run_file in [f for f in os.listdir(logs_dir) if os.path.isfile(os.path.join(logs_dir, f))]:
120
with open(os.path.join(logs_dir, run_file), 'r') as run:
121
reader = csv.DictReader(run)
122
init = dict({fn: 0 for fn in reader.fieldnames if fn != 'datetime'}, **{'count': 0})
123
run_total = reduce(add_row, reader, init)
124
runs.append({k: v / run_total['count'] for k, v in list(run_total.items()) if k != 'count'})
125
runs_total = reduce(lambda x, y: {k: v + y[k] for k, v in list(x.items())}, runs)
126
return OrderedDict(
127
sorted(list({'android_' + k: v / len(runs) for k, v in list(runs_total.items())}.items()), key=lambda x: x[0]))
128
129
def aggregate_final(self, data_dir):
130
rows = []
131
for device in self.list_subdir(data_dir):
132
row = OrderedDict({'device': device})
133
device_dir = os.path.join(data_dir, device)
134
for subject in self.list_subdir(device_dir):
135
row.update({'subject': subject})
136
subject_dir = os.path.join(device_dir, subject)
137
if os.path.isdir(os.path.join(subject_dir, 'AndroidPlugin')):
138
row.update(self.aggregate_android_final(os.path.join(subject_dir, 'AndroidPlugin')))
139
rows.append(row.copy())
140
else:
141
for browser in self.list_subdir(subject_dir):
142
row.update({'browser': browser})
143
browser_dir = os.path.join(subject_dir, browser)
144
if os.path.isdir(os.path.join(browser_dir, 'AndroidPlugin')):
145
row.update(self.aggregate_android_final(os.path.join(browser_dir, 'AndroidPlugin')))
146
rows.append(row.copy())
147
return rows
148
149
@staticmethod
150
def aggregate_android_final(logs_dir):
151
for aggregated_file in [f for f in os.listdir(logs_dir) if os.path.isfile(os.path.join(logs_dir, f))]:
152
if aggregated_file == "Aggregated.csv":
153
with open(os.path.join(logs_dir, aggregated_file), 'r') as aggregated:
154
reader = csv.DictReader(aggregated)
155
row_dict = OrderedDict()
156
for row in reader:
157
for f in reader.fieldnames:
158
row_dict.update({f: row[f]})
159
return OrderedDict(row_dict)
160
161
@staticmethod
162
def list_subdir(a_dir):
163
"""List immediate subdirectories of a_dir"""
164
# https://stackoverflow.com/a/800201
165
return [name for name in os.listdir(a_dir)
166
if os.path.isdir(os.path.join(a_dir, name))]
167
168
@staticmethod
169
def write_to_file(filename, rows):
170
with open(filename, 'w') as f:
171
writer = csv.DictWriter(f, list(rows[0].keys()))
172
writer.writeheader()
173
writer.writerows(rows)
174
175
@staticmethod
176
def is_integer(number, minimum=0):
177
if not isinstance(number, int):
178
raise ConfigError('%s is not an integer' % number)
179
if number < minimum:
180
raise ConfigError('%s should be equal or larger than %i' % (number, minimum))
181
return number
182
183