Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cache/hisi_soc_hha.c
38184 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Driver for HiSilicon Hydra Home Agent (HHA).
4
*
5
* Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
6
* Author: Yicong Yang <[email protected]>
7
* Yushan Wang <[email protected]>
8
*
9
* A system typically contains multiple HHAs. Each is responsible for a subset
10
* of the physical addresses in the system, but interleave can make the mapping
11
* from a particular cache line to a responsible HHA complex. As such no
12
* filtering is done in the driver, with the hardware being responsible for
13
* responding with success for even if it was not responsible for any addresses
14
* in the range on which the operation was requested.
15
*/
16
17
#include <linux/bitfield.h>
18
#include <linux/cache_coherency.h>
19
#include <linux/dev_printk.h>
20
#include <linux/init.h>
21
#include <linux/io.h>
22
#include <linux/iopoll.h>
23
#include <linux/kernel.h>
24
#include <linux/memregion.h>
25
#include <linux/module.h>
26
#include <linux/mod_devicetable.h>
27
#include <linux/mutex.h>
28
#include <linux/platform_device.h>
29
30
#define HISI_HHA_CTRL 0x5004
31
#define HISI_HHA_CTRL_EN BIT(0)
32
#define HISI_HHA_CTRL_RANGE BIT(1)
33
#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
34
#define HISI_HHA_START_L 0x5008
35
#define HISI_HHA_START_H 0x500c
36
#define HISI_HHA_LEN_L 0x5010
37
#define HISI_HHA_LEN_H 0x5014
38
39
/* The maintain operation performs in a 128 Byte granularity */
40
#define HISI_HHA_MAINT_ALIGN 128
41
42
#define HISI_HHA_POLL_GAP_US 10
43
#define HISI_HHA_POLL_TIMEOUT_US 50000
44
45
struct hisi_soc_hha {
46
/* Must be first element */
47
struct cache_coherency_ops_inst cci;
48
/* Locks HHA instance to forbid overlapping access. */
49
struct mutex lock;
50
void __iomem *base;
51
};
52
53
static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
54
{
55
u32 val;
56
57
return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
58
!(val & HISI_HHA_CTRL_EN),
59
HISI_HHA_POLL_GAP_US,
60
HISI_HHA_POLL_TIMEOUT_US);
61
}
62
63
static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
64
struct cc_inval_params *invp)
65
{
66
struct hisi_soc_hha *soc_hha =
67
container_of(cci, struct hisi_soc_hha, cci);
68
phys_addr_t top, addr = invp->addr;
69
size_t size = invp->size;
70
u32 reg;
71
72
if (!size)
73
return -EINVAL;
74
75
addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
76
top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
77
size = top - addr;
78
79
guard(mutex)(&soc_hha->lock);
80
81
if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
82
return -EBUSY;
83
84
/*
85
* Hardware will search for addresses ranging [addr, addr + size - 1],
86
* last byte included, and perform maintenance in 128 byte granules
87
* on those cachelines which contain the addresses. If a given instance
88
* is either not responsible for a cacheline or that cacheline is not
89
* currently present then the search will fail, no operation will be
90
* necessary and the device will report success.
91
*/
92
size -= 1;
93
94
writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
95
writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
96
writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
97
writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
98
99
reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
100
reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
101
writel(reg, soc_hha->base + HISI_HHA_CTRL);
102
103
return 0;
104
}
105
106
static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
107
{
108
struct hisi_soc_hha *soc_hha =
109
container_of(cci, struct hisi_soc_hha, cci);
110
111
guard(mutex)(&soc_hha->lock);
112
if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
113
return -ETIMEDOUT;
114
115
return 0;
116
}
117
118
static const struct cache_coherency_ops hha_ops = {
119
.wbinv = hisi_soc_hha_wbinv,
120
.done = hisi_soc_hha_done,
121
};
122
123
static int hisi_soc_hha_probe(struct platform_device *pdev)
124
{
125
struct hisi_soc_hha *soc_hha;
126
struct resource *mem;
127
int ret;
128
129
soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
130
struct hisi_soc_hha, cci);
131
if (!soc_hha)
132
return -ENOMEM;
133
134
platform_set_drvdata(pdev, soc_hha);
135
136
mutex_init(&soc_hha->lock);
137
138
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
139
if (!mem) {
140
ret = -ENOMEM;
141
goto err_free_cci;
142
}
143
144
soc_hha->base = ioremap(mem->start, resource_size(mem));
145
if (!soc_hha->base) {
146
ret = dev_err_probe(&pdev->dev, -ENOMEM,
147
"failed to remap io memory");
148
goto err_free_cci;
149
}
150
151
ret = cache_coherency_ops_instance_register(&soc_hha->cci);
152
if (ret)
153
goto err_iounmap;
154
155
return 0;
156
157
err_iounmap:
158
iounmap(soc_hha->base);
159
err_free_cci:
160
cache_coherency_ops_instance_put(&soc_hha->cci);
161
return ret;
162
}
163
164
static void hisi_soc_hha_remove(struct platform_device *pdev)
165
{
166
struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
167
168
cache_coherency_ops_instance_unregister(&soc_hha->cci);
169
iounmap(soc_hha->base);
170
cache_coherency_ops_instance_put(&soc_hha->cci);
171
}
172
173
static const struct acpi_device_id hisi_soc_hha_ids[] = {
174
{ "HISI0511", },
175
{ }
176
};
177
MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
178
179
static struct platform_driver hisi_soc_hha_driver = {
180
.driver = {
181
.name = "hisi_soc_hha",
182
.acpi_match_table = hisi_soc_hha_ids,
183
},
184
.probe = hisi_soc_hha_probe,
185
.remove = hisi_soc_hha_remove,
186
};
187
188
module_platform_driver(hisi_soc_hha_driver);
189
190
MODULE_IMPORT_NS("CACHE_COHERENCY");
191
MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
192
MODULE_AUTHOR("Yicong Yang <[email protected]>");
193
MODULE_AUTHOR("Yushan Wang <[email protected]>");
194
MODULE_LICENSE("GPL");
195
196