sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.
Previously this was using a hardcoded 32, use L1_CACHE_BYTES for cacheline alignment instead. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
5c36e6578d
commit
87e29cacb7
|
@ -3,6 +3,7 @@
|
||||||
* Written by Niibe Yutaka
|
* Written by Niibe Yutaka
|
||||||
*/
|
*/
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
|
#include <asm/cache.h>
|
||||||
#include <asm-generic/vmlinux.lds.h>
|
#include <asm-generic/vmlinux.lds.h>
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||||
|
@ -53,7 +54,7 @@ SECTIONS
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
.data.page_aligned : { *(.data.page_aligned) }
|
.data.page_aligned : { *(.data.page_aligned) }
|
||||||
|
|
||||||
. = ALIGN(32);
|
. = ALIGN(L1_CACHE_BYTES);
|
||||||
__per_cpu_start = .;
|
__per_cpu_start = .;
|
||||||
.data.percpu : { *(.data.percpu) }
|
.data.percpu : { *(.data.percpu) }
|
||||||
__per_cpu_end = .;
|
__per_cpu_end = .;
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
|
|
||||||
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
|
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
struct cache_info {
|
struct cache_info {
|
||||||
unsigned int ways; /* Number of cache ways */
|
unsigned int ways; /* Number of cache ways */
|
||||||
unsigned int sets; /* Number of cache sets */
|
unsigned int sets; /* Number of cache sets */
|
||||||
|
@ -47,6 +48,6 @@ struct cache_info {
|
||||||
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
};
|
};
|
||||||
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* __ASM_SH_CACHE_H */
|
#endif /* __ASM_SH_CACHE_H */
|
||||||
|
|
Loading…
Reference in New Issue