cache.c 6.7 KB
Newer Older
A
ArcherChang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
#include "nds32.h"
#include "cache.h"
#include "string.h"

void nds32_dcache_invalidate(void){

#ifdef CONFIG_CPU_DCACHE_ENABLE
	__nds32__cctl_l1d_invalall();

	__nds32__msync_store();
	__nds32__dsb();
#endif
}

void nds32_dcache_flush(void){

#ifdef CONFIG_CPU_DCACHE_ENABLE

#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
	unsigned long saved_gie;
#endif
	unsigned long end;
	unsigned long cache_line;

	cache_line = CACHE_LINE_SIZE(DCACHE);
	end = CACHE_WAY(DCACHE) * CACHE_SET(DCACHE) * cache_line;

#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
	GIE_SAVE(&saved_gie);

	/*
	 * Use CCTL L1D_IX_WB/L1D_IX_INVAL subtype instead of combined
	 * L1D_IX_WBINVAL. Because only N903 supports L1D_IX_WBINVAL.
	 */
	do {
		end -= cache_line;
		__nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_WB, end);
		__nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, end);
	} while (end > 0);
	GIE_RESTORE(saved_gie);
#else
	while (end > 0){
		end -= cache_line;
		__nds32__cctlidx_wbinval(NDS32_CCTL_L1D_IX_INVAL, end);
	}
#endif
	__nds32__msync_store();
	__nds32__dsb();
#endif
}

void nds32_icache_flush(void){

#ifdef CONFIG_CPU_ICACHE_ENABLE
	unsigned long end;
	unsigned long cache_line = CACHE_LINE_SIZE(ICACHE);

	end = CACHE_WAY(ICACHE) * CACHE_SET(ICACHE) * CACHE_LINE_SIZE(ICACHE);

	do {
		end -= cache_line;
		__nds32__cctlidx_wbinval(NDS32_CCTL_L1I_IX_INVAL, end);
	} while (end > 0);

	__nds32__isb();
#endif
}

#ifdef CONFIG_CHECK_RANGE_ALIGNMENT
#define chk_range_alignment(start, end, line_size) do {	\
							\
	BUG_ON((start) & ((line_size) - 1));		\
	BUG_ON((end) & ((line_size) - 1));		\
	BUG_ON((start) == (end));			\
							\
} while (0);
#else
#define chk_range_alignment(start, end, line_size)
#endif
/* ================================ D-CACHE =============================== */
/*
 * nds32_dcache_clean_range(start, end)
 *
 * For the specified virtual address range, ensure that all caches contain
 * clean data, such that peripheral accesses to the physical RAM fetch
 * correct data.
 */
void nds32_dcache_clean_range(unsigned long start, unsigned long end){

#ifdef CONFIG_CPU_DCACHE_ENABLE
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH

	unsigned long line_size;

	line_size = CACHE_LINE_SIZE(DCACHE);
	chk_range_alignment(start, end, line_size);

	while (end > start){
		__nds32__cctlva_wbinval_one_lvl(NDS32_CCTL_L1D_VA_WB, (void *)start);
		start += line_size;
	}

	__nds32__msync_store();
	__nds32__dsb();
#endif
#endif
}

void nds32_dma_clean_range(unsigned long start, unsigned long end){

	unsigned long line_size;
	line_size = CACHE_LINE_SIZE(DCACHE);
	start = start & (~(line_size-1));
	end = (end + line_size -1) & (~(line_size-1));
	if (start == end)
		return;

	nds32_dcache_clean_range(start, end);
}

/*
 * nds32_dcache_invalidate_range(start, end)
 *
 * throw away all D-cached data in specified region without an obligation
 * to write them back. Note however that we must clean the D-cached entries
 * around the boundaries if the start and/or end address are not cache
 * aligned.
 */
void nds32_dcache_invalidate_range(unsigned long start, unsigned long end){

#ifdef CONFIG_CPU_DCACHE_ENABLE

	unsigned long line_size;

	line_size = CACHE_LINE_SIZE(DCACHE);
	chk_range_alignment(start, end, line_size);

	while (end > start){
		__nds32__cctlva_wbinval_one_lvl(NDS32_CCTL_L1D_VA_INVAL, (void *)start);
		start += line_size;
	}
#endif
}

void nds32_dcache_flush_range(unsigned long start, unsigned long end){

#ifdef CONFIG_CPU_DCACHE_ENABLE
	unsigned long line_size;

	line_size = CACHE_LINE_SIZE(DCACHE);

	while (end > start){
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
		__nds32__cctlva_wbinval_one_lvl(NDS32_CCTL_L1D_VA_WB, (void *)start);
#endif
		__nds32__cctlva_wbinval_one_lvl(NDS32_CCTL_L1D_VA_INVAL, (void *)start);
		start += line_size;
	}
#endif
}

void nds32_dcache_writeback_range(unsigned long start, unsigned long end){

#ifdef CONFIG_CPU_DCACHE_ENABLE
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
	unsigned long line_size;

	line_size = CACHE_LINE_SIZE(DCACHE);

	while (end > start){
		__nds32__cctlva_wbinval_one_lvl(NDS32_CCTL_L1D_VA_WB, (void *)start);
		start += line_size;
	}
#endif
#endif
}
void unaligned_cache_line_move(unsigned char* src, unsigned char* dst, unsigned long len )
{
         int i;
         unsigned char* src_p = (unsigned char*)src;
         unsigned char* dst_p = (unsigned char*)dst;
         for( i = 0 ;i < len; ++i)
                 *(dst_p+i)=*(src_p+i);
}



void nds32_dma_inv_range(unsigned long start, unsigned long end){
	unsigned long line_size;
	unsigned long old_start=start;
	unsigned long old_end=end;
	line_size = CACHE_LINE_SIZE(DCACHE);
	unsigned char h_buf[line_size];
	unsigned char t_buf[line_size];
	memset((void*)h_buf,0,line_size);
	memset((void*)t_buf,0,line_size);

	start = start & (~(line_size-1));
	end = (end + line_size -1) & (~(line_size-1));
	if (start == end)
		return;
	if (start != old_start)
	{
		//nds32_dcache_flush_range(start, start + line_size);
		unaligned_cache_line_move((unsigned char*)start, h_buf, old_start - start);
	}
	if (end != old_end)
	{
		//nds32_dcache_flush_range(end - line_size ,end);
		unaligned_cache_line_move((unsigned char*)old_end, t_buf, end - old_end);

	}
	nds32_dcache_invalidate_range(start, end);

	//handle cache line unaligned problem
	if(start != old_start)
		unaligned_cache_line_move(h_buf,(unsigned char*)start, old_start - start);

	if( end != old_end )
		unaligned_cache_line_move(t_buf,(unsigned char*)old_end, end - old_end);

}


void nds32_dma_flush_range(unsigned long start, unsigned long end){

	unsigned long line_size;
	line_size = CACHE_LINE_SIZE(DCACHE);
	start = start & (~(line_size-1));
	end = (end + line_size -1 ) & (~(line_size-1));
	if (start == end)
		return;

	nds32_dcache_flush_range(start, end);
}

/* ================================ I-CACHE =============================== */
/*
 * nds32_icache_invalidate_range(start, end)
 *
 * invalidate a range of virtual addresses from the Icache
 *
 * This is a little misleading, it is not intended to clean out
 * the i-cache but to make sure that any data written to the
 * range is made consistant. This means that when we execute code
 * in that region, everything works as we expect.
 *
 * This generally means writing back data in the Dcache and
 * write buffer and flushing the Icache over that region
 *
 * start: virtual start address
 * end: virtual end address
 */
void nds32_icache_invalidate_range(unsigned long start, unsigned long end){

#ifdef CONFIG_CPU_ICACHE_ENABLE

	unsigned long line_size;

	line_size = CACHE_LINE_SIZE(ICACHE);
	//chk_range_alignment(start, end, line_size);
	start &= (~(line_size-1));
	end = ( end + line_size - 1 )&(~(line_size-1));
	if (end == start)
		end += line_size;

	while (end > start){

		end -= line_size;
		__nds32__cctlva_wbinval_one_lvl(NDS32_CCTL_L1I_VA_INVAL, (void *)end);
	}
#endif
}