Root/target/linux/ubicom32/files/arch/ubicom32/mach-common/cachectl.c

1/*
2 * arch/ubicom32/mach-common/cachectl.c
3 * Architecture cache control support
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 *
7 * This file is part of the Ubicom32 Linux Kernel Port.
8 *
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
22 *
23 * Ubicom32 implementation derived from (with many thanks):
24 * arch/m68knommu
25 * arch/blackfin
26 * arch/parisc
27 */
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <asm/cachectl.h>
32
33/*
34 * The write queue flush procedure in mem_cache_control needs to make
35 * DCACHE_WRITE_QUEUE_LENGTH writes to DDR (not OCM). Here we reserve some
36 * memory for this operation.
37 * Allocate array of cache lines of least DCACHE_WRITE_QUEUE_LENGTH + 1 words in
38 * length rounded up to the nearest cache line.
39 */
40#define CACHE_WRITE_QUEUE_FLUSH_AREA_SIZE \
41    ALIGN(sizeof(int) * (DCACHE_WRITE_QUEUE_LENGTH + 1), CACHE_LINE_SIZE)
42
43static char cache_write_queue_flush_area[CACHE_WRITE_QUEUE_FLUSH_AREA_SIZE]
44    __attribute__((aligned(CACHE_LINE_SIZE)));
45
46/*
47 * ONE_CCR_ADDR_OP is a helper macro that executes a single CCR operation.
48 */
49#define ONE_CCR_ADDR_OP(cc, op_addr, op) \
50    do { \
51        asm volatile ( \
52        " btst "D(CCR_CTRL)"(%0), #"D(CCR_CTRL_VALID)" \n\t" \
53        " jmpne.f .-4 \n\t" \
54        " move.4 "D(CCR_ADDR)"(%0), %1 \n\t" \
55        " move.1 "D(CCR_CTRL+3)"(%0), %2 \n\t" \
56        " bset "D(CCR_CTRL)"(%0), "D(CCR_CTRL)"(%0), #"D(CCR_CTRL_VALID)" \n\t" \
57        " cycles 2 \n\t" \
58        " btst "D(CCR_CTRL)"(%0), #"D(CCR_CTRL_DONE)" \n\t" \
59        " jmpeq.f .-4 \n\t" \
60            : \
61            : "a"(cc), "r"(op_addr), "r"(op & 0xff) \
62            : "cc" \
63        ); \
64    } while (0)
65
66/*
67 * mem_cache_control()
68 * Special cache control operation
69 */
70void mem_cache_control(unsigned long cc, unsigned long begin_addr,
71               unsigned long end_addr, unsigned long op)
72{
73    unsigned long op_addr;
74    int dccr = cc == DCCR_BASE;
75    if (dccr && op == CCR_CTRL_FLUSH_ADDR) {
76        /*
77         * We ensure all previous writes have left the data cache write
78         * queue by sending DCACHE_WRITE_QUEUE_LENGTH writes (to
79         * different words) down the queue. If this is not done it's
80         * possible that the data we are trying to flush hasn't even
81         * entered the data cache.
82         * The +1 ensure that the final 'flush' is actually a flush.
83         */
84        int *flush_area = (int *)cache_write_queue_flush_area;
85        asm volatile(
86            " .rept "D(DCACHE_WRITE_QUEUE_LENGTH + 1)" \n\t"
87            " move.4 (%0)4++, d0 \n\t"
88            " .endr \n\t"
89            : "+a"(flush_area)
90            );
91    }
92
93    if (dccr)
94        UBICOM32_LOCK(DCCR_LOCK_BIT);
95    else
96        UBICOM32_LOCK(ICCR_LOCK_BIT);
97
98    /*
99     * Calculate the cache lines we need to operate on that include
100     * begin_addr though end_addr.
101     */
102    begin_addr = begin_addr & ~(CACHE_LINE_SIZE - 1);
103    end_addr = (end_addr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
104    op_addr = begin_addr;
105
106    do {
107        ONE_CCR_ADDR_OP(cc, op_addr, op);
108        op_addr += CACHE_LINE_SIZE;
109    } while (likely(op_addr < end_addr));
110
111    if (dccr && op == CCR_CTRL_FLUSH_ADDR) {
112        /*
113         * It turns out that when flushing the data cache the last flush
114         * isn't actually complete at this point. This is because there
115         * is another write buffer on the DDR side of the cache that is
116         * arbitrated with the I-Cache.
117         *
118         * The only foolproof method that ensures that the last data
119         * cache flush *actually* completed is to do another flush on a
120         * dirty cache line. This flush will block until the DDR write
121         * buffer is empty.
122         *
123         * Rather than creating a another dirty cache line, we use the
124         * flush_area above as we know that it is dirty from previous
125         * writes.
126         */
127        ONE_CCR_ADDR_OP(cc, cache_write_queue_flush_area, op);
128    }
129
130    if (dccr)
131        UBICOM32_UNLOCK(DCCR_LOCK_BIT);
132    else
133        UBICOM32_UNLOCK(ICCR_LOCK_BIT);
134
135}
136EXPORT_SYMBOL(mem_cache_control);
137

Archive Download this file



interactive