Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bus/mhi/ep/ring.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2022 Linaro Ltd.
4
* Author: Manivannan Sadhasivam <[email protected]>
5
*/
6
7
#include <linux/mhi_ep.h>
8
#include "internal.h"
9
10
size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
11
{
12
return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
13
}
14
15
static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
16
{
17
__le64 rlen;
18
19
memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
20
21
return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
22
}
23
24
void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
25
{
26
ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
27
}
28
29
static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
30
{
31
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
32
struct device *dev = &mhi_cntrl->mhi_dev->dev;
33
struct mhi_ep_buf_info buf_info = {};
34
size_t start;
35
int ret;
36
37
/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
38
if (ring->type == RING_TYPE_ER)
39
return 0;
40
41
/* No need to cache the ring if write pointer is unmodified */
42
if (ring->wr_offset == end)
43
return 0;
44
45
start = ring->wr_offset;
46
if (start < end) {
47
buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
48
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
49
buf_info.dev_addr = &ring->ring_cache[start];
50
51
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
52
if (ret < 0)
53
return ret;
54
} else {
55
buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
56
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
57
buf_info.dev_addr = &ring->ring_cache[start];
58
59
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
60
if (ret < 0)
61
return ret;
62
63
if (end) {
64
buf_info.host_addr = ring->rbase;
65
buf_info.dev_addr = &ring->ring_cache[0];
66
buf_info.size = end * sizeof(struct mhi_ring_element);
67
68
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
69
if (ret < 0)
70
return ret;
71
}
72
}
73
74
dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
75
76
return 0;
77
}
78
79
static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
80
{
81
size_t wr_offset;
82
int ret;
83
84
wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
85
86
/* Cache the host ring till write offset */
87
ret = __mhi_ep_cache_ring(ring, wr_offset);
88
if (ret)
89
return ret;
90
91
ring->wr_offset = wr_offset;
92
93
return 0;
94
}
95
96
int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
97
{
98
u64 wr_ptr;
99
100
wr_ptr = mhi_ep_mmio_get_db(ring);
101
102
return mhi_ep_cache_ring(ring, wr_ptr);
103
}
104
105
/* TODO: Support for adding multiple ring elements to the ring */
106
int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
107
{
108
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
109
struct device *dev = &mhi_cntrl->mhi_dev->dev;
110
struct mhi_ep_buf_info buf_info = {};
111
size_t old_offset = 0;
112
u32 num_free_elem;
113
__le64 rp;
114
int ret;
115
116
ret = mhi_ep_update_wr_offset(ring);
117
if (ret) {
118
dev_err(dev, "Error updating write pointer\n");
119
return ret;
120
}
121
122
if (ring->rd_offset < ring->wr_offset)
123
num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
124
else
125
num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
126
127
/* Check if there is space in ring for adding at least an element */
128
if (!num_free_elem) {
129
dev_err(dev, "No space left in the ring\n");
130
return -ENOSPC;
131
}
132
133
old_offset = ring->rd_offset;
134
135
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
136
buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
137
buf_info.dev_addr = el;
138
buf_info.size = sizeof(*el);
139
140
ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
141
if (ret)
142
return ret;
143
144
mhi_ep_ring_inc_index(ring);
145
146
/* Update rp in ring context */
147
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
148
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
149
150
return ret;
151
}
152
153
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
154
{
155
ring->type = type;
156
if (ring->type == RING_TYPE_CMD) {
157
ring->db_offset_h = EP_CRDB_HIGHER;
158
ring->db_offset_l = EP_CRDB_LOWER;
159
} else if (ring->type == RING_TYPE_CH) {
160
ring->db_offset_h = CHDB_HIGHER_n(id);
161
ring->db_offset_l = CHDB_LOWER_n(id);
162
ring->ch_id = id;
163
} else {
164
ring->db_offset_h = ERDB_HIGHER_n(id);
165
ring->db_offset_l = ERDB_LOWER_n(id);
166
}
167
}
168
169
static void mhi_ep_raise_irq(struct work_struct *work)
170
{
171
struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work);
172
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
173
174
mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
175
WRITE_ONCE(ring->irq_pending, false);
176
}
177
178
int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
179
union mhi_ep_ring_ctx *ctx)
180
{
181
struct device *dev = &mhi_cntrl->mhi_dev->dev;
182
__le64 val;
183
int ret;
184
185
ring->mhi_cntrl = mhi_cntrl;
186
ring->ring_ctx = ctx;
187
ring->ring_size = mhi_ep_ring_num_elems(ring);
188
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
189
ring->rbase = le64_to_cpu(val);
190
191
if (ring->type == RING_TYPE_CH)
192
ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
193
194
if (ring->type == RING_TYPE_ER) {
195
ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
196
ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK,
197
le32_to_cpu(ring->ring_ctx->ev.intmod));
198
199
INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq);
200
}
201
202
/* During ring init, both rp and wp are equal */
203
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
204
ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
205
ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
206
207
/* Allocate ring cache memory for holding the copy of host ring */
208
ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
209
if (!ring->ring_cache)
210
return -ENOMEM;
211
212
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
213
ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
214
if (ret) {
215
dev_err(dev, "Failed to cache ring\n");
216
kfree(ring->ring_cache);
217
return ret;
218
}
219
220
ring->started = true;
221
222
return 0;
223
}
224
225
void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
226
{
227
if (ring->type == RING_TYPE_ER)
228
cancel_delayed_work_sync(&ring->intmodt_work);
229
230
ring->started = false;
231
kfree(ring->ring_cache);
232
ring->ring_cache = NULL;
233
}
234
235