Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/ceph/subvolume_metrics.h
170852 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _FS_CEPH_SUBVOLUME_METRICS_H
3
#define _FS_CEPH_SUBVOLUME_METRICS_H
4
5
#include <linux/types.h>
6
#include <linux/rbtree.h>
7
#include <linux/spinlock.h>
8
#include <linux/ktime.h>
9
#include <linux/atomic.h>
10
11
struct seq_file;
12
struct ceph_mds_client;
13
struct ceph_inode_info;
14
15
/**
16
* struct ceph_subvol_metric_snapshot - Point-in-time snapshot of subvolume metrics
17
* @subvolume_id: Subvolume identifier (inode number of subvolume root)
18
* @read_ops: Number of read operations since last snapshot
19
* @write_ops: Number of write operations since last snapshot
20
* @read_bytes: Total bytes read since last snapshot
21
* @write_bytes: Total bytes written since last snapshot
22
* @read_latency_us: Sum of read latencies in microseconds (for avg calculation)
23
* @write_latency_us: Sum of write latencies in microseconds (for avg calculation)
24
*/
25
struct ceph_subvol_metric_snapshot {
26
u64 subvolume_id;
27
u64 read_ops;
28
u64 write_ops;
29
u64 read_bytes;
30
u64 write_bytes;
31
u64 read_latency_us;
32
u64 write_latency_us;
33
};
34
35
/**
36
* struct ceph_subvolume_metrics_tracker - Tracks per-subvolume I/O metrics
37
* @lock: Protects @tree and @nr_entries during concurrent access
38
* @tree: Red-black tree of per-subvolume entries, keyed by subvolume_id
39
* @nr_entries: Number of entries currently in @tree
40
* @enabled: Whether collection is enabled (requires MDS feature support)
41
* @snapshot_attempts: Debug counter: total ceph_subvolume_metrics_snapshot() calls
42
* @snapshot_empty: Debug counter: snapshots that found no data to report
43
* @snapshot_failures: Debug counter: snapshots that failed to allocate memory
44
* @record_calls: Debug counter: total ceph_subvolume_metrics_record() calls
45
* @record_disabled: Debug counter: record calls skipped because disabled
46
* @record_no_subvol: Debug counter: record calls skipped (no subvolume_id)
47
* @total_read_ops: Cumulative read ops across all snapshots (never reset)
48
* @total_read_bytes: Cumulative bytes read across all snapshots (never reset)
49
* @total_write_ops: Cumulative write ops across all snapshots (never reset)
50
* @total_write_bytes: Cumulative bytes written across all snapshots (never reset)
51
*/
52
struct ceph_subvolume_metrics_tracker {
53
spinlock_t lock;
54
struct rb_root_cached tree;
55
u32 nr_entries;
56
bool enabled;
57
atomic64_t snapshot_attempts;
58
atomic64_t snapshot_empty;
59
atomic64_t snapshot_failures;
60
atomic64_t record_calls;
61
atomic64_t record_disabled;
62
atomic64_t record_no_subvol;
63
atomic64_t total_read_ops;
64
atomic64_t total_read_bytes;
65
atomic64_t total_write_ops;
66
atomic64_t total_write_bytes;
67
};
68
69
void ceph_subvolume_metrics_init(struct ceph_subvolume_metrics_tracker *tracker);
70
void ceph_subvolume_metrics_destroy(struct ceph_subvolume_metrics_tracker *tracker);
71
void ceph_subvolume_metrics_enable(struct ceph_subvolume_metrics_tracker *tracker,
72
bool enable);
73
void ceph_subvolume_metrics_record(struct ceph_subvolume_metrics_tracker *tracker,
74
u64 subvol_id, bool is_write,
75
size_t size, u64 latency_us);
76
int ceph_subvolume_metrics_snapshot(struct ceph_subvolume_metrics_tracker *tracker,
77
struct ceph_subvol_metric_snapshot **out,
78
u32 *nr, bool consume);
79
void ceph_subvolume_metrics_free_snapshot(struct ceph_subvol_metric_snapshot *snapshot);
80
void ceph_subvolume_metrics_dump(struct ceph_subvolume_metrics_tracker *tracker,
81
struct seq_file *s);
82
83
void ceph_subvolume_metrics_record_io(struct ceph_mds_client *mdsc,
84
struct ceph_inode_info *ci,
85
bool is_write, size_t bytes,
86
ktime_t start, ktime_t end);
87
88
static inline bool ceph_subvolume_metrics_enabled(
89
const struct ceph_subvolume_metrics_tracker *tracker)
90
{
91
return READ_ONCE(tracker->enabled);
92
}
93
94
int __init ceph_subvolume_metrics_cache_init(void);
95
void ceph_subvolume_metrics_cache_destroy(void);
96
97
#endif /* _FS_CEPH_SUBVOLUME_METRICS_H */
98
99