Root/target/linux/lantiq/patches-2.6.32/260-ar9-cache-split.patch

1--- a/arch/mips/Kconfig
2+++ b/arch/mips/Kconfig
3@@ -1653,6 +1653,28 @@
4     help
5       IFX included extensions in APRP
6 
7+config IFX_VPE_CACHE_SPLIT
8+ bool "IFX Cache Split Ways"
9+ depends on IFX_VPE_EXT
10+ help
11+ IFX extension for reserving (splitting) cache ways among VPEs. You must
12+ give kernel command line arguments vpe_icache_shared=0 or
13+ vpe_dcache_shared=0 to enable splitting of icache or dcache
14+ respectively. Then you can specify which cache ways should be
15+ assigned to which VPE. There are total 8 cache ways, 4 each
16+ for dcache and icache: dcache_way0, dcache_way1,dcache_way2,
17+ dcache_way3 and icache_way0,icache_way1, icache_way2,icache_way3.
18+
19+ For example, if you specify vpe_icache_shared=0 and icache_way2=1,
20+ then the 3rd icache way will be assigned to VPE0 and denied in VPE1.
21+
22+ For icache, software is required to make at least one cache way available
23+ for a VPE at all times i.e., one can't assign all the icache ways to one
24+ VPE.
25+
26+ By default, vpe_dcache_shared and vpe_icache_shared are set to 1
27+ (i.e., both icache and dcache are shared among VPEs)
28+
29 config PERFCTRS
30     bool "34K Performance counters"
31     depends on MIPS_MT && PROC_FS
32--- a/arch/mips/kernel/vpe.c
33+++ b/arch/mips/kernel/vpe.c
34@@ -129,6 +129,13 @@
35 EXPORT_SYMBOL(vpe1_wdog_timeout);
36 
37 #endif
38+
39+#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
40+extern int vpe_icache_shared,vpe_dcache_shared;
41+extern int icache_way0,icache_way1,icache_way2,icache_way3;
42+extern int dcache_way0,dcache_way1,dcache_way2,dcache_way3;
43+#endif
44+
45 /* grab the likely amount of memory we will need. */
46 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
47 #define P_SIZE (2 * 1024 * 1024)
48@@ -867,6 +874,65 @@
49     /* enable this VPE */
50     write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
51 
52+#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
53+ if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ) {
54+
55+ /* PCP bit must be 1 to split the cache */
56+ if(read_c0_mvpconf0() & MVPCONF0_PCP) {
57+
58+ if ( !vpe_icache_shared ){
59+ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_ICS);
60+
61+ /*
62+ * If any cache way is 1, then that way is denied
63+ * in VPE1. Otherwise assign that way to VPE1.
64+ */
65+ if (!icache_way0)
66+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX0 );
67+ else
68+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX0 );
69+ if (!icache_way1)
70+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX1 );
71+ else
72+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX1 );
73+ if (!icache_way2)
74+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX2 );
75+ else
76+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX2 );
77+ if (!icache_way3)
78+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX3 );
79+ else
80+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX3 );
81+ }
82+
83+ if ( !vpe_dcache_shared ) {
84+ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_DCS);
85+
86+ /*
87+ * If any cache way is 1, then that way is denied
88+ * in VPE1. Otherwise assign that way to VPE1.
89+ */
90+ if (!dcache_way0)
91+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX0 );
92+ else
93+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX0 );
94+ if (!dcache_way1)
95+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX1 );
96+ else
97+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX1 );
98+ if (!dcache_way2)
99+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX2 );
100+ else
101+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX2 );
102+ if (!dcache_way3)
103+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX3 );
104+ else
105+ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX3 );
106+ }
107+ }
108+ }
109+#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
110+
111     /* clear out any left overs from a previous program */
112     write_vpe_c0_status(0);
113     write_vpe_c0_cause(0);
114--- a/arch/mips/mm/c-r4k.c
115+++ b/arch/mips/mm/c-r4k.c
116@@ -1348,6 +1348,106 @@
117 __setup("coherentio", setcoherentio);
118 #endif
119 
120+#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
121+
122+#include <asm/mipsmtregs.h>
123+
124+/*
125+ * By default, vpe_icache_shared and vpe_dcache_shared
126+ * values are 1 i.e., both icache and dcache are shared
127+ * among the VPEs.
128+ */
129+
130+int vpe_icache_shared = 1;
131+static int __init vpe_icache_shared_val(char *str)
132+{
133+ get_option(&str, &vpe_icache_shared);
134+ return 1;
135+}
136+__setup("vpe_icache_shared=", vpe_icache_shared_val);
137+EXPORT_SYMBOL(vpe_icache_shared);
138+
139+int vpe_dcache_shared = 1;
140+static int __init vpe_dcache_shared_val(char *str)
141+{
142+ get_option(&str, &vpe_dcache_shared);
143+ return 1;
144+}
145+__setup("vpe_dcache_shared=", vpe_dcache_shared_val);
146+EXPORT_SYMBOL(vpe_dcache_shared);
147+
148+/*
149+ * Software is required to make atleast one icache
150+ * way available for a VPE at all times i.e., one
151+ * can't assign all the icache ways to one VPE.
152+ */
153+
154+int icache_way0 = 0;
155+static int __init icache_way0_val(char *str)
156+{
157+ get_option(&str, &icache_way0);
158+ return 1;
159+}
160+__setup("icache_way0=", icache_way0_val);
161+
162+int icache_way1 = 0;
163+static int __init icache_way1_val(char *str)
164+{
165+ get_option(&str, &icache_way1);
166+ return 1;
167+}
168+__setup("icache_way1=", icache_way1_val);
169+
170+int icache_way2 = 0;
171+static int __init icache_way2_val(char *str)
172+{
173+ get_option(&str, &icache_way2);
174+ return 1;
175+}
176+__setup("icache_way2=", icache_way2_val);
177+
178+int icache_way3 = 0;
179+static int __init icache_way3_val(char *str)
180+{
181+ get_option(&str, &icache_way3);
182+ return 1;
183+}
184+__setup("icache_way3=", icache_way3_val);
185+
186+int dcache_way0 = 0;
187+static int __init dcache_way0_val(char *str)
188+{
189+ get_option(&str, &dcache_way0);
190+ return 1;
191+}
192+__setup("dcache_way0=", dcache_way0_val);
193+
194+int dcache_way1 = 0;
195+static int __init dcache_way1_val(char *str)
196+{
197+ get_option(&str, &dcache_way1);
198+ return 1;
199+}
200+__setup("dcache_way1=", dcache_way1_val);
201+
202+int dcache_way2 = 0;
203+static int __init dcache_way2_val(char *str)
204+{
205+ get_option(&str, &dcache_way2);
206+ return 1;
207+}
208+__setup("dcache_way2=", dcache_way2_val);
209+
210+int dcache_way3 = 0;
211+static int __init dcache_way3_val(char *str)
212+{
213+ get_option(&str, &dcache_way3);
214+ return 1;
215+}
216+__setup("dcache_way3=", dcache_way3_val);
217+
218+#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
219+
220 void __cpuinit r4k_cache_init(void)
221 {
222     extern void build_clear_page(void);
223@@ -1367,6 +1467,78 @@
224         break;
225     }
226 
227+#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
228+ /*
229+ * We split the cache ways appropriately among the VPEs
230+ * based on cache ways values we received as command line
231+ * arguments
232+ */
233+ if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ){
234+
235+ /* PCP bit must be 1 to split the cache */
236+ if(read_c0_mvpconf0() & MVPCONF0_PCP) {
237+
238+ /* Set CPA bit which enables us to modify VPEOpt register */
239+ write_c0_mvpcontrol((read_c0_mvpcontrol()) | MVPCONTROL_CPA);
240+
241+ if ( !vpe_icache_shared ){
242+ write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_ICS);
243+ /*
244+ * If any cache way is 1, then that way is denied
245+ * in VPE0. Otherwise assign that way to VPE0.
246+ */
247+ printk(KERN_DEBUG "icache is split\n");
248+ printk(KERN_DEBUG "icache_way0=%d icache_way1=%d icache_way2=%d icache_way3=%d\n",
249+ icache_way0, icache_way1,icache_way2, icache_way3);
250+ if (icache_way0)
251+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX0 );
252+ else
253+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX0 );
254+ if (icache_way1)
255+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX1 );
256+ else
257+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX1 );
258+ if (icache_way2)
259+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX2 );
260+ else
261+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX2 );
262+ if (icache_way3)
263+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX3 );
264+ else
265+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX3 );
266+ }
267+
268+ if ( !vpe_dcache_shared ) {
269+ /*
270+ * If any cache way is 1, then that way is denied
271+ * in VPE0. Otherwise assign that way to VPE0.
272+ */
273+ printk(KERN_DEBUG "dcache is split\n");
274+ printk(KERN_DEBUG "dcache_way0=%d dcache_way1=%d dcache_way2=%d dcache_way3=%d\n",
275+ dcache_way0, dcache_way1, dcache_way2, dcache_way3);
276+ write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_DCS);
277+ if (dcache_way0)
278+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX0 );
279+ else
280+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX0 );
281+ if (dcache_way1)
282+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX1 );
283+ else
284+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX1 );
285+ if (dcache_way2)
286+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX2 );
287+ else
288+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX2 );
289+ if (dcache_way3)
290+ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX3 );
291+ else
292+ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX3 );
293+ }
294+ }
295+ }
296+
297+#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
298+
299     probe_pcache();
300     setup_scache();
301 
302

Archive Download this file



interactive