csky-for-linus-5.4-rc1: arch/csky patches for 5.4-rc1

This round of csky subsystem just some fixups.
 
 Fixup:
  - Fixup mb() synchronization problem
  - Fixup dma_alloc_coherent with PAGE_SO attribute
  - Fixup cache_op failed when cross memory ZONEs
  - Optimize arch_sync_dma_for_cpu/device with dma_inv_range
  - Fixup ioremap function losing
  - Fixup arch_get_unmapped_area() implementation
  - Fixup defer cache flush for 610
  - Support kernel non-aligned access
  - Fixup 610 vipt cache flush mechanism
  - Fixup add zero_fp fixup perf backtrace panic
  - Move static keyword to the front of declaration
  - Fixup csky_pmu.max_period assignment
  - Use generic free_initrd_mem()
  - entry: Remove unneeded need_resched() loop
 
 CI-Tested: https://gitlab.com/c-sky/buildroot/pipelines/77689888
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEE2KAv+isbWR/viAKHAXH1GYaIxXsFAl2Rfc0SHHJlbl9ndW9A
 Yy1za3kuY29tAAoJEAFx9RmGiMV7eEUQAIuuO1ym/o7zMWZsIRbISJYi6xuq752W
 RVhnPv957ktFYlqrtelDuZwkCPnK03YDTv8lPzcIhes+xYtuspN6NMum7SUWPt4c
 IbfQe4GmMwPGNrJ76NO9YcWQig8flT75wWE2CzOjhGpBfEkEHmtbOvOiZujcq4yN
 vhj8TKF9CjOSaDJNCOXinC6DeGoftRYTSRq6NMPfFfHIWqFcZIyb1Fz8tE/vFP4Y
 qEoG/ys/eu114DIZVnxr5ihmrJ4gjxmlXlnYA8WMRnRt6/oHEmmbtHLTT2eHbI2T
 u4TCssqrLoNADV7g7zyAyBnrgf4dXnJivGjc2NzVLAkgNbEJn+oGNYn+kdiRGd5X
 1CNVbzbZN0gHwLayDjJ4BWNxtpxxqmOjlDffjLVRW7dleTdfHvcxJwfhlvbQOPuq
 nMj4t1qJswn44f/bsj+F13hgXV0tWctYui/HS78xscE7t4yMNduf3JAm7TIxT0XN
 ej3102ffm4ycKZtfdwKwcACmMBJc+2QvGYmQo2L9pVLYIQe3QUWYs881V50PJXxV
 jdz4kmhpfKLie+yxmsdN1/8nTlcHF5wDiYwW9UColzfz1fCRkoPRx5tu/YI9zLoi
 K41CFEv4Z+aVzcp4HZNmOvVhGWgdmuwICMdY16wawhN6SAkvKJBFaP4g24h0J95M
 QyAPQl/VUMED
 =FLYg
 -----END PGP SIGNATURE-----

Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:
 "This round of csky subsystem just some fixups:

   - Fix mb() synchronization problem

   - Fix dma_alloc_coherent with PAGE_SO attribute

   - Fix cache_op failed when cross memory ZONEs

   - Optimize arch_sync_dma_for_cpu/device with dma_inv_range

   - Fix ioremap function losing

   - Fix arch_get_unmapped_area() implementation

   - Fix defer cache flush for 610

   - Support kernel non-aligned access

   - Fix 610 vipt cache flush mechanism

   - Fix add zero_fp fixup perf backtrace panic

   - Move static keyword to the front of declaration

   - Fix csky_pmu.max_period assignment

   - Use generic free_initrd_mem()

   - entry: Remove unneeded need_resched() loop"

* tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux:
  csky: Move static keyword to the front of declaration
  csky: entry: Remove unneeded need_resched() loop
  csky: Fixup csky_pmu.max_period assignment
  csky: Fixup add zero_fp fixup perf backtrace panic
  csky: Use generic free_initrd_mem()
  csky: Fixup 610 vipt cache flush mechanism
  csky: Support kernel non-aligned access
  csky: Fixup defer cache flush for 610
  csky: Fixup arch_get_unmapped_area() implementation
  csky: Fixup ioremap function losing
  csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range
  csky/dma: Fixup cache_op failed when cross memory ZONEs
  csky: Fixup dma_alloc_coherent with PAGE_SO attribute
  csky: Fixup mb() synchronization problem
This commit is contained in:
Linus Torvalds 2019-09-30 10:16:17 -07:00
commit 80b29b6b8c
17 changed files with 292 additions and 213 deletions

View File

@ -5,8 +5,10 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>
static int align_enable = 1;
static int align_count;
static int align_kern_enable = 1;
static int align_usr_enable = 1;
static int align_kern_count = 0;
static int align_usr_count = 0;
static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
{
@ -32,9 +34,6 @@ static int ldb_asm(uint32_t addr, uint32_t *valp)
uint32_t val;
int err;
if (!access_ok((void *)addr, 1))
return 1;
asm volatile (
"movi %0, 0\n"
"1:\n"
@ -67,9 +66,6 @@ static int stb_asm(uint32_t addr, uint32_t val)
{
int err;
if (!access_ok((void *)addr, 1))
return 1;
asm volatile (
"movi %0, 0\n"
"1:\n"
@ -203,8 +199,6 @@ static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
if (stb_asm(addr, byte3))
return 1;
align_count++;
return 0;
}
@ -226,7 +220,14 @@ void csky_alignment(struct pt_regs *regs)
uint32_t addr = 0;
if (!user_mode(regs))
goto kernel_area;
if (!align_usr_enable) {
pr_err("%s user disabled.\n", __func__);
goto bad_area;
}
align_usr_count++;
ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
if (ret) {
@ -234,6 +235,19 @@ void csky_alignment(struct pt_regs *regs)
goto bad_area;
}
goto good_area;
kernel_area:
if (!align_kern_enable) {
pr_err("%s kernel disabled.\n", __func__);
goto bad_area;
}
align_kern_count++;
tmp = *(uint16_t *)instruction_pointer(regs);
good_area:
opcode = (uint32_t)tmp;
rx = opcode & 0xf;
@ -286,18 +300,32 @@ bad_area:
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
}
static struct ctl_table alignment_tbl[4] = {
static struct ctl_table alignment_tbl[5] = {
{
.procname = "enable",
.data = &align_enable,
.maxlen = sizeof(align_enable),
.procname = "kernel_enable",
.data = &align_kern_enable,
.maxlen = sizeof(align_kern_enable),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "count",
.data = &align_count,
.maxlen = sizeof(align_count),
.procname = "user_enable",
.data = &align_usr_enable,
.maxlen = sizeof(align_usr_enable),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "kernel_count",
.data = &align_kern_count,
.maxlen = sizeof(align_kern_count),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "user_count",
.data = &align_usr_count,
.maxlen = sizeof(align_usr_count),
.mode = 0666,
.proc_handler = &proc_dointvec
},

View File

@ -11,42 +11,66 @@
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
#define PG_dcache_clean PG_arch_1
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long addr;
struct address_space *mapping;
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_arch_1, &(page)->flags);
if (page == ZERO_PAGE(0))
return;
}
/*
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
addr = (unsigned long) page_address(page);
dcache_wb_range(addr, addr + PAGE_SIZE);
}
mapping = page_mapping_file(page);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
if (mapping && !page_mapcount(page))
clear_bit(PG_dcache_clean, &page->flags);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
unsigned long addr;
unsigned long pfn = pte_pfn(*ptep);
struct page *page;
unsigned long pfn;
pfn = pte_pfn(*pte);
if (unlikely(!pfn_valid(pfn)))
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
addr = (unsigned long) page_address(page);
if (page == ZERO_PAGE(0))
return;
if (vma->vm_flags & VM_EXEC ||
pages_do_alias(addr, address & PAGE_MASK))
cache_wbinv_all();
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
dcache_wbinv_all();
clear_bit(PG_arch_1, &(page)->flags);
if (page_mapping_file(page)) {
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
}
void flush_kernel_dcache_page(struct page *page)
{
struct address_space *mapping;
mapping = page_mapping_file(page);
if (!mapping || mapping_mapped(mapping))
dcache_wbinv_all();
}
EXPORT_SYMBOL(flush_kernel_dcache_page);
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
dcache_wbinv_all();
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}

View File

@ -4,46 +4,63 @@
#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H
#include <linux/compiler.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm/cache.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
#define flush_cache_mm(mm) cache_wbinv_all()
#define flush_cache_mm(mm) dcache_wbinv_all()
#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
#define flush_cache_dup_mm(mm) cache_wbinv_all()
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
static inline void flush_kernel_vmap_range(void *addr, int size)
{
dcache_wbinv_all();
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
dcache_wbinv_all();
}
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
cache_wbinv_all();
}
/*
* if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
* Use cache_wbinv_all() here and need to be improved in future.
*/
#define flush_cache_range(vma, start, end) cache_wbinv_all()
#define flush_cache_vmap(start, end) cache_wbinv_range(start, end)
#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end)
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) cache_wbinv_all()
#define flush_cache_vunmap(start, end) cache_wbinv_all()
#define flush_icache_page(vma, page) cache_wbinv_all()
#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_user_range(vma, pg, adr, len) \
cache_wbinv_range(adr, adr + len)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
cache_wbinv_all(); \
memcpy(dst, src, len); \
cache_wbinv_all(); \
} while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
cache_wbinv_all(); \
memcpy(dst, src, len); \
cache_wbinv_all(); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
#endif /* __ABI_CSKY_CACHEFLUSH_H */

View File

@ -1,13 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
extern unsigned long shm_align_mask;
#include <asm/shmparam.h>
extern void flush_dcache_page(struct page *page);
static inline unsigned long pages_do_alias(unsigned long addr1,
unsigned long addr2)
{
return (addr1 ^ addr2) & shm_align_mask;
return (addr1 ^ addr2) & (SHMLBA-1);
}
static inline void clear_user_page(void *addr, unsigned long vaddr,

View File

@ -9,58 +9,63 @@
#include <linux/random.h>
#include <linux/io.h>
unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */
#define COLOUR_ALIGN(addr,pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*
* We unconditionally provide this function for all cases.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vmm;
int do_color_align;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
struct vm_unmapped_area_info info;
if (flags & MAP_FIXED) {
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
* We only need to do colour alignment if either the I or D
* caches alias.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vmm = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
(!vmm || addr + len <= vmm->vm_start))
return addr;
}
addr = TASK_UNMAPPED_BASE;
if (do_color_align)
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
addr = vmm->vm_end;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}

View File

@ -9,11 +9,12 @@
#define nop() asm volatile ("nop\n":::"memory")
/*
* sync: completion barrier
* sync.s: completion barrier and shareable to other cores
* sync.i: completion barrier with flush cpu pipeline
* sync.is: completion barrier with flush cpu pipeline and shareable to
* other cores
* sync: completion barrier, all sync.xx instructions
* guarantee the last response recieved by bus transaction
* made by ld/st instructions before sync.s
* sync.s: inherit from sync, but also shareable to other cores
* sync.i: inherit from sync, but also flush cpu pipeline
* sync.is: the same with sync.i + sync.s
*
* bar.brwarw: ordering barrier for all load/store instructions before it
* bar.brwarws: ordering barrier for all load/store instructions before it
@ -27,9 +28,7 @@
*/
#ifdef CONFIG_CPU_HAS_CACHEV2
#define mb() asm volatile ("bar.brwarw\n":::"memory")
#define rmb() asm volatile ("bar.brar\n":::"memory")
#define wmb() asm volatile ("bar.bwaw\n":::"memory")
#define mb() asm volatile ("sync.s\n":::"memory")
#ifdef CONFIG_SMP
#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")

View File

@ -24,6 +24,7 @@ void cache_wbinv_range(unsigned long start, unsigned long end);
void cache_wbinv_all(void);
void dma_wbinv_range(unsigned long start, unsigned long end);
void dma_inv_range(unsigned long start, unsigned long end);
void dma_wb_range(unsigned long start, unsigned long end);
#endif

View File

@ -4,17 +4,10 @@
#ifndef __ASM_CSKY_IO_H
#define __ASM_CSKY_IO_H
#include <abi/pgtable-bits.h>
#include <asm/pgtable.h>
#include <linux/types.h>
#include <linux/version.h>
extern void __iomem *ioremap(phys_addr_t offset, size_t size);
extern void iounmap(void *addr);
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
size_t size, unsigned long flags);
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
@ -40,9 +33,17 @@ extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
#endif
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
/*
* I/O memory mapping functions.
*/
extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
extern void iounmap(void *addr);
#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_cache ioremap_cache
#include <asm-generic/io.h>

View File

@ -258,6 +258,16 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
return __pgprot(prot);
}
#define pgprot_writecombine pgprot_writecombine
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
return __pgprot(prot);

View File

@ -17,6 +17,12 @@
#define PTE_INDX_SHIFT 10
#define _PGDIR_SHIFT 22
.macro zero_fp
#ifdef CONFIG_STACKTRACE
movi r8, 0
#endif
.endm
.macro tlbop_begin name, val0, val1, val2
ENTRY(csky_\name)
mtcr a3, ss2
@ -96,6 +102,7 @@ ENTRY(csky_\name)
SAVE_ALL 0
.endm
.macro tlbop_end is_write
zero_fp
RD_MEH a2
psrset ee, ie
mov a0, sp
@ -120,6 +127,7 @@ tlbop_end 1
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
zero_fp
psrset ee, ie
@ -136,9 +144,9 @@ ENTRY(csky_systemcall)
mov r9, sp
bmaski r10, THREAD_SHIFT
andn r9, r10
ldw r8, (r9, TINFO_FLAGS)
ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
cmpnei r8, 0
ldw r12, (r9, TINFO_FLAGS)
ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
cmpnei r12, 0
bt csky_syscall_trace
#if defined(__CSKYABIV2__)
subi sp, 8
@ -180,7 +188,7 @@ csky_syscall_trace:
ENTRY(ret_from_kernel_thread)
jbsr schedule_tail
mov a0, r8
mov a0, r10
jsr r9
jbsr ret_from_exception
@ -189,9 +197,9 @@ ENTRY(ret_from_fork)
mov r9, sp
bmaski r10, THREAD_SHIFT
andn r9, r10
ldw r8, (r9, TINFO_FLAGS)
ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
cmpnei r8, 0
ldw r12, (r9, TINFO_FLAGS)
ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
cmpnei r12, 0
bf ret_from_exception
mov a0, sp /* sp = pt_regs pointer */
jbsr syscall_trace_exit
@ -209,9 +217,9 @@ ret_from_exception:
bmaski r10, THREAD_SHIFT
andn r9, r10
ldw r8, (r9, TINFO_FLAGS)
andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
cmpnei r8, 0
ldw r12, (r9, TINFO_FLAGS)
andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
cmpnei r12, 0
bt exit_work
1:
RESTORE_ALL
@ -220,11 +228,11 @@ exit_work:
lrw syscallid, ret_from_exception
mov lr, syscallid
btsti r8, TIF_NEED_RESCHED
btsti r12, TIF_NEED_RESCHED
bt work_resched
mov a0, sp
mov a1, r8
mov a1, r12
jmpi do_notify_resume
work_resched:
@ -232,6 +240,7 @@ work_resched:
ENTRY(csky_trap)
SAVE_ALL 0
zero_fp
psrset ee
mov a0, sp /* Push Stack pointer arg */
jbsr trap_c /* Call C-level trap handler */
@ -265,6 +274,7 @@ ENTRY(csky_get_tls)
ENTRY(csky_irq)
SAVE_ALL 0
zero_fp
psrset ee
#ifdef CONFIG_PREEMPT
@ -276,27 +286,23 @@ ENTRY(csky_irq)
* Get task_struct->stack.preempt_count for current,
* and increase 1.
*/
ldw r8, (r9, TINFO_PREEMPT)
addi r8, 1
stw r8, (r9, TINFO_PREEMPT)
ldw r12, (r9, TINFO_PREEMPT)
addi r12, 1
stw r12, (r9, TINFO_PREEMPT)
#endif
mov a0, sp
jbsr csky_do_IRQ
#ifdef CONFIG_PREEMPT
subi r8, 1
stw r8, (r9, TINFO_PREEMPT)
cmpnei r8, 0
subi r12, 1
stw r12, (r9, TINFO_PREEMPT)
cmpnei r12, 0
bt 2f
ldw r8, (r9, TINFO_FLAGS)
btsti r8, TIF_NEED_RESCHED
ldw r12, (r9, TINFO_FLAGS)
btsti r12, TIF_NEED_RESCHED
bf 2f
1:
jbsr preempt_schedule_irq /* irq en/disable is done inside */
ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */
btsti r7, TIF_NEED_RESCHED
bt 1b /* go again */
#endif
2:
jmpi ret_from_exception

View File

@ -1306,7 +1306,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
&csky_pmu.count_width)) {
csky_pmu.count_width = DEFAULT_COUNT_WIDTH;
}
csky_pmu.max_period = BIT(csky_pmu.count_width) - 1;
csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1;
csky_pmu.plat_device = pdev;
@ -1337,7 +1337,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
return ret;
}
const static struct of_device_id csky_pmu_of_device_ids[] = {
static const struct of_device_id csky_pmu_of_device_ids[] = {
{.compatible = "csky,csky-pmu"},
{},
};

View File

@ -55,7 +55,7 @@ int copy_thread(unsigned long clone_flags,
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childstack->r15 = (unsigned long) ret_from_kernel_thread;
childstack->r8 = kthread_arg;
childstack->r10 = kthread_arg;
childstack->r9 = usp;
childregs->sr = mfcr("psr");
} else {

View File

@ -120,7 +120,12 @@ void dma_wbinv_range(unsigned long start, unsigned long end)
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
}
void dma_inv_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
}
void dma_wb_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1);
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
}

View File

@ -69,11 +69,20 @@ void dma_wbinv_range(unsigned long start, unsigned long end)
sync_is();
}
void dma_inv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.iva %0\n"::"r"(i):"memory");
sync_is();
}
void dma_wb_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
asm volatile("dcache.cva %0\n"::"r"(i):"memory");
sync_is();
}

View File

@ -14,69 +14,50 @@
#include <linux/version.h>
#include <asm/cache.h>
void arch_dma_prep_coherent(struct page *page, size_t size)
{
if (PageHighMem(page)) {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
do {
void *ptr = kmap_atomic(page);
size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
memset(ptr, 0, _size);
dma_wbinv_range((unsigned long)ptr,
(unsigned long)ptr + _size);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
count--;
} while (count);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
}
}
static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end))
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned int offset = paddr & ~PAGE_MASK;
struct page *page = phys_to_page(paddr);
void *start = __va(page_to_phys(page));
unsigned long offset = offset_in_page(paddr);
size_t left = size;
unsigned long start;
do {
size_t len = left;
if (PageHighMem(page)) {
void *addr;
if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
start = (unsigned long)(addr + offset);
fn(start, start + len);
kunmap_atomic(addr);
if (PageHighMem(page)) {
start = kmap_atomic(page);
fn((unsigned long)start + offset,
(unsigned long)start + offset + len);
kunmap_atomic(start);
} else {
start = (unsigned long)phys_to_virt(paddr);
fn(start, start + size);
fn((unsigned long)start + offset,
(unsigned long)start + offset + len);
}
offset = 0;
page++;
start += PAGE_SIZE;
left -= len;
} while (left);
}
static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
{
memset((void *)start, 0, end - start);
dma_wbinv_range(start, end);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
@ -98,11 +79,10 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
{
switch (dir) {
case DMA_TO_DEVICE:
cache_op(paddr, size, dma_wb_range);
break;
return;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(paddr, size, dma_wbinv_range);
cache_op(paddr, size, dma_inv_range);
break;
default:
BUG();

View File

@ -60,22 +60,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
pr_info("Freeing initrd memory: %ldk freed\n",
(end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages_inc();
}
}
#endif
extern char __init_begin[], __init_end[];
void free_initmem(void)

View File

@ -8,12 +8,12 @@
#include <asm/pgtable.h>
void __iomem *ioremap(phys_addr_t addr, size_t size)
static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
pgprot_t prot, void *caller)
{
phys_addr_t last_addr;
unsigned long offset, vaddr;
struct vm_struct *area;
pgprot_t prot;
last_addr = addr + size - 1;
if (!size || last_addr < addr)
@ -23,15 +23,12 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset);
area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0));
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE |
_PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO);
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area);
return NULL;
@ -39,7 +36,20 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
return (void __iomem *)(vaddr + offset);
}
EXPORT_SYMBOL(ioremap);
void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
{
return __ioremap_caller(phys_addr, size, prot,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__ioremap);
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
void iounmap(void __iomem *addr)
{
@ -51,10 +61,9 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn)) {
vma_prot.pgprot |= _PAGE_SO;
return pgprot_noncached(vma_prot);
} else if (file->f_flags & O_SYNC) {
return pgprot_noncached(vma_prot);
return pgprot_writecombine(vma_prot);
}
return vma_prot;