aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cache/ax45mp_cache.c
blob: 57186c58dc849c15db2f9c25ad8c816398f29986 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
// SPDX-License-Identifier: GPL-2.0
/*
 * non-coherent cache functions for Andes AX45MP
 *
 * Copyright (C) 2023 Renesas Electronics Corp.
 */

#include <linux/cacheflush.h>
#include <linux/cacheinfo.h>
#include <linux/dma-direction.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>

#include <asm/dma-noncoherent.h>

/* L2 cache registers */
#define AX45MP_L2C_REG_CTL_OFFSET		0x8

#define AX45MP_L2C_REG_C0_CMD_OFFSET		0x40
#define AX45MP_L2C_REG_C0_ACC_OFFSET		0x48
#define AX45MP_L2C_REG_STATUS_OFFSET		0x80

/* D-cache operation */
#define AX45MP_CCTL_L1D_VA_INVAL		0 /* Invalidate an L1 cache entry */
#define AX45MP_CCTL_L1D_VA_WB			1 /* Write-back an L1 cache entry */

/* L2 CCTL status */
#define AX45MP_CCTL_L2_STATUS_IDLE		0

/* L2 CCTL status cores mask */
#define AX45MP_CCTL_L2_STATUS_C0_MASK		0xf

/* L2 cache operation */
#define AX45MP_CCTL_L2_PA_INVAL			0x8 /* Invalidate an L2 cache entry */
#define AX45MP_CCTL_L2_PA_WB			0x9 /* Write-back an L2 cache entry */

#define AX45MP_L2C_REG_PER_CORE_OFFSET		0x10
#define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET	4

#define AX45MP_L2C_REG_CN_CMD_OFFSET(n)	\
	(AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
#define AX45MP_L2C_REG_CN_ACC_OFFSET(n)	\
	(AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
#define AX45MP_CCTL_L2_STATUS_CN_MASK(n)	\
	(AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))

#define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM	0x80b
#define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM	0x80c

#define AX45MP_CACHE_LINE_SIZE			64

struct ax45mp_priv {
	void __iomem *l2c_base;
	u32 ax45mp_cache_line_size;
};

static struct ax45mp_priv ax45mp_priv;

/* L2 Cache operations */
static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
{
	return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
}

static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
				       unsigned int l1_op, unsigned int l2_op)
{
	unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size;
	void __iomem *base = ax45mp_priv.l2c_base;
	int mhartid = smp_processor_id();
	unsigned long pa;

	while (end > start) {
		csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
		csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);

		pa = virt_to_phys((void *)start);
		writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
		writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
		while ((ax45mp_cpu_l2c_get_cctl_status() &
			AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
			AX45MP_CCTL_L2_STATUS_IDLE)
			;

		start += line_size;
	}
}

/* Write-back L1 and L2 cache entry */
static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end)
{
	ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB,
				   AX45MP_CCTL_L2_PA_WB);
}

/* Invalidate the L1 and L2 cache entry */
static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end)
{
	ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL,
				   AX45MP_CCTL_L2_PA_INVAL);
}

static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size)
{
	unsigned long start = (unsigned long)phys_to_virt(paddr);
	unsigned long end = start + size;
	unsigned long line_size;
	unsigned long flags;

	if (unlikely(start == end))
		return;

	line_size = ax45mp_priv.ax45mp_cache_line_size;

	start = start & (~(line_size - 1));
	end = ((end + line_size - 1) & (~(line_size - 1)));

	local_irq_save(flags);

	ax45mp_cpu_dcache_inval_range(start, end);

	local_irq_restore(flags);
}

static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
{
	unsigned long start = (unsigned long)phys_to_virt(paddr);
	unsigned long end = start + size;
	unsigned long line_size;
	unsigned long flags;

	line_size = ax45mp_priv.ax45mp_cache_line_size;
	start = start & (~(line_size - 1));
	local_irq_save(flags);
	ax45mp_cpu_dcache_wb_range(start, end);
	local_irq_restore(flags);
}

static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
	ax45mp_dma_cache_wback(paddr, size);
	ax45mp_dma_cache_inv(paddr, size);
}

static int ax45mp_get_l2_line_size(struct device_node *np)
{
	int ret;

	ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size);
	if (ret) {
		pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
		return ret;
	}

	if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
		pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
		       ax45mp_priv.ax45mp_cache_line_size);
		return -EINVAL;
	}

	return 0;
}

static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = {
	.wback = &ax45mp_dma_cache_wback,
	.inv = &ax45mp_dma_cache_inv,
	.wback_inv = &ax45mp_dma_cache_wback_inv,
};

static const struct of_device_id ax45mp_cache_ids[] = {
	{ .compatible = "andestech,ax45mp-cache" },
	{ /* sentinel */ }
};

static int __init ax45mp_cache_init(void)
{
	struct device_node *np;
	struct resource res;
	int ret;

	np = of_find_matching_node(NULL, ax45mp_cache_ids);
	if (!of_device_is_available(np))
		return -ENODEV;

	ret = of_address_to_resource(np, 0, &res);
	if (ret)
		return ret;

	/*
	 * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
	 * will be 0 for sure, so we can definitely rely on it. If
	 * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
	 * more so we just return success here and only if its being set we
	 * continue further in the probe path.
	 */
	if (!riscv_cbom_block_size)
		return 0;

	ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res));
	if (!ax45mp_priv.l2c_base)
		return -ENOMEM;

	ret = ax45mp_get_l2_line_size(np);
	if (ret) {
		iounmap(ax45mp_priv.l2c_base);
		return ret;
	}

	riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);

	return 0;
}
early_initcall(ax45mp_cache_init);