Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
jantic
GitHub Repository: jantic/deoldify
Path: blob/master/fastai/callbacks/mem.py
781 views
1
" Memory profiling callbacks "
2
3
import tracemalloc, threading, torch, time
4
from ..utils.mem import *
5
from ..basic_train import *
6
from ..torch_core import *
7
from ..utils.pynvml_gate import *
8
9
if use_gpu: pynvml = load_pynvml_env()
10
11
class PeakMemMetric(LearnerCallback):
12
"Callback that measures used and peaked general and GPU memory."
13
14
_order=-20 # Needs to run before the recorder
15
16
def __init__(self, learn:Learner):
17
super().__init__(learn)
18
assert torch.cuda.is_available(), "pytorch CUDA is required"
19
preload_pytorch()
20
21
def peak_monitor_start(self):
22
self.peak_monitoring = True
23
24
# start RAM tracing
25
tracemalloc.start()
26
27
# this thread samples RAM usage as long as the current epoch of the fit loop is running
28
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
29
peak_monitor_thread.daemon = True
30
peak_monitor_thread.start()
31
32
def peak_monitor_stop(self):
33
tracemalloc.stop()
34
self.peak_monitoring = False
35
36
def peak_monitor_func(self):
37
self.gpu_mem_used_peak = -1
38
39
gpu_id = torch.cuda.current_device()
40
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
41
42
while True:
43
gpu_mem_used = gpu_mem_get_used_fast(gpu_handle)
44
self.gpu_mem_used_peak = max(gpu_mem_used, self.gpu_mem_used_peak)
45
if not self.peak_monitoring: break
46
time.sleep(0.001) # 1msec
47
48
def on_train_begin(self, **kwargs):
49
self.learn.recorder.add_metric_names(['cpu used', 'peak', 'gpu used', 'peak'])
50
51
def on_epoch_begin(self, **kwargs):
52
self.peak_monitor_start()
53
self.gpu_before = gpu_mem_get_used_no_cache()
54
55
def on_epoch_end(self, last_metrics, **kwargs):
56
cpu_used, cpu_peak = list(map(lambda x: int(x/2**20), tracemalloc.get_traced_memory()))
57
self.peak_monitor_stop()
58
gpu_used = gpu_mem_get_used_no_cache() - self.gpu_before
59
gpu_peak = self.gpu_mem_used_peak - self.gpu_before
60
# can be negative, due to unreliable peak monitor thread
61
if gpu_peak < 0: gpu_peak = 0
62
# since we want the overhead only, subtract delta used if it's positive
63
elif gpu_used > 0: gpu_peak -= gpu_used
64
# The numbers are deltas in MBs (beginning of the epoch and the end)
65
return add_metrics(last_metrics, [cpu_used, cpu_peak, gpu_used, gpu_peak])
66
67