Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
S2-group
GitHub Repository: S2-group/android-runner
Path: blob/master/AndroidRunner/Plugins/monsoon/Monsoon.py
907 views
1
import csv
2
import os
3
import os.path as op
4
import time
5
from collections import OrderedDict
6
7
from AndroidRunner import util
8
from AndroidRunner.Plugins.Profiler import Profiler
9
from AndroidRunner.Plugins.monsoon.script.power_device import power_meter
10
11
class Monsoon(Profiler):
12
def __init__(self, config, paths):
13
super(Monsoon, self).__init__(config, paths)
14
self.output_dir = ''
15
self.paths = paths
16
self.profile = False
17
self.data_points = ["energy_joules", "duration_ms", "error_flag"]
18
19
def dependencies(self):
20
return []
21
22
def load(self, device):
23
return
24
25
def start_profiling(self, device, **kwargs):
26
"""Start the profiling process"""
27
28
# Quickly let the mobile device sleep and wake up so a run can take up to 30 minutes.
29
device.shell("input keyevent KEYCODE_SLEEP")
30
device.shell("input keyevent KEYCODE_WAKEUP")
31
time.sleep(5)
32
self.profile = True
33
power_meter.start()
34
35
def stop_profiling(self, device, **kwargs):
36
"""Stop the profiling process"""
37
self.results = power_meter.stop()
38
self.profile = False
39
40
# Quickly let the mobile device sleep and wake up so the device is awake for 30 minutes.
41
# This solves the issue of certain commands sent to the device blocking the execution of the program.
42
device.shell("input keyevent KEYCODE_SLEEP")
43
device.shell("input keyevent KEYCODE_WAKEUP")
44
time.sleep(5)
45
46
def collect_results(self, device):
47
"""Collect the data and clean up extra files on the device, save data in location set by 'set_output' """
48
filename = 'monsoon_{}.csv'.format(time.strftime('%Y.%m.%d_%H%M%S'))
49
with open(op.join(self.output_dir, filename), 'w+') as f:
50
writer = csv.writer(f)
51
writer.writerow(self.data_points)
52
#Seconds to milliseconds
53
writer.writerow([self.results[0], round(self.results[1]*1000), self.results[2]])
54
55
def unload(self, device):
56
return
57
58
def set_output(self, output_dir):
59
"""Set the output directory before the start_profiling is called"""
60
self.output_dir = output_dir
61
62
def aggregate_subject(self):
63
"""Aggregate the data at the end of a subject, collect data and save data to location set by 'set output' """
64
with open(op.join(self.output_dir, 'aggregated.csv'), 'w+') as output:
65
writer = csv.writer(output)
66
writer.writerow(self.data_points)
67
68
# Loop over files containing the measurements for each run in ascending order (oldest run first, newest run last).
69
for output_file in sorted(os.listdir(self.output_dir), reverse=False):
70
if output_file.startswith("monsoon_"):
71
res = open(op.join(self.output_dir, output_file)).readlines()[1]
72
res = res.rstrip()
73
res = res.split(",")
74
writer.writerow([res[0],res[1],res[2]])
75
76
def aggregate_end(self, data_dir, output_file):
77
"""Aggregate the data at the end of the experiment.
78
Data located in file structure inside data_dir. Save aggregated data to output_file
79
"""
80
rows = self.aggregate_final(data_dir)
81
util.write_to_file(output_file, rows)
82
83
def aggregate_final(self, data_dir):
84
"""Compiles subject aggregation files"""
85
rows = []
86
for device in util.list_subdir(data_dir):
87
row = OrderedDict({'device': device})
88
device_dir = os.path.join(data_dir, device)
89
for subject in util.list_subdir(device_dir):
90
row.update({'subject': subject})
91
subject_dir = os.path.join(device_dir, subject)
92
if os.path.isdir(os.path.join(subject_dir, 'monsoon')):
93
temp_row = row.copy()
94
row.update(self.get_aggregated_runs_subject(
95
os.path.join(subject_dir, 'monsoon')))
96
self.add_rows(row, temp_row, rows, subject_dir)
97
98
else:
99
for browser in util.list_subdir(subject_dir):
100
row.update({'browser': browser})
101
browser_dir = os.path.join(subject_dir, browser)
102
if os.path.isdir(os.path.join(browser_dir, 'monsoon')):
103
temp_row = row.copy()
104
row.update(self.get_aggregated_runs_subject(
105
os.path.join(browser_dir, 'monsoon')))
106
self.add_rows(row, temp_row, rows, browser_dir)
107
return rows
108
109
@staticmethod
110
def get_aggregated_runs_subject(logs_dir):
111
"""Finds the aggregated file for a subject and returns the rows of that file. The data returned is a key-value pair where the value is a list"""
112
for aggregated_file in [f for f in os.listdir(logs_dir) if os.path.isfile(os.path.join(logs_dir, f))]:
113
if aggregated_file == "aggregated.csv":
114
with open(os.path.join(logs_dir, aggregated_file), 'r') as aggregated:
115
reader = csv.DictReader(aggregated)
116
row_dict = OrderedDict()
117
for row in reader:
118
for f in reader.fieldnames:
119
if f in row_dict.keys():
120
temp = row_dict[f]
121
temp.append(row[f])
122
row_dict.update({f: temp})
123
else:
124
row_dict.update({f: [row[f]]})
125
return OrderedDict(row_dict)
126
127
def add_rows(self, row, temp_row, rows, dir):
128
"""Retrieves the list values in the key-value pairs from get_aggregated_runs_subject and creates n rows for list of size n"""
129
repetition_count = len(os.listdir(os.path.join(dir, 'monsoon'))) - 1
130
for i in range(repetition_count):
131
temp_row.update({self.data_points[0]: row[self.data_points[0]][i], self.data_points[1]: row[self.data_points[1]][i], self.data_points[2]: row[self.data_points[2]][i]})
132
rows.append(temp_row.copy())
133
return rows
134
135