Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: Fix build of cpm_uart due to core changes powerpc/8xx: Fix regression introduced by cache coherency rewrite powerpc/4xx: Fix erroneous xmon warning on PowerPC 4xx powerpc/mm: Fix 40x and 8xx vs. _PAGE_SPECIAL powerpc: Cleanup linker script using new linker script macros. powerpc: Fix ibm,client-architecture-support printout powerpc: Increase NODES_SHIFT on 64bit from 4 to 8 powerpc/perf_counter: Fix vdso detection powerpc: Move 64bit heap above 1TB on machines with 1TB segments powerpc: Change archdata dma_data to a union powerpc: Rename get_dma_direct_offset get_dma_offset powerpc/mm: Remove duplicated #include powerpc/book3e-64: Remove duplicated #include powerpc: Check for unsupported relocs when using CONFIG_RELOCATABLE powerpc/pmc: Don't access lppaca on Book3E powerpc: kmalloc failure ignored in vio_build_iommu_table() hvc_console: Provide (un)locked version for hvc_resize()
This commit is contained in:
commit
8e44e43477
|
@ -385,9 +385,15 @@ config NUMA
|
||||||
|
|
||||||
config NODES_SHIFT
|
config NODES_SHIFT
|
||||||
int
|
int
|
||||||
|
default "8" if PPC64
|
||||||
default "4"
|
default "4"
|
||||||
depends on NEED_MULTIPLE_NODES
|
depends on NEED_MULTIPLE_NODES
|
||||||
|
|
||||||
|
config MAX_ACTIVE_REGIONS
|
||||||
|
int
|
||||||
|
default "256" if PPC64
|
||||||
|
default "32"
|
||||||
|
|
||||||
config ARCH_SELECT_MEMORY_MODEL
|
config ARCH_SELECT_MEMORY_MODEL
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on PPC64
|
depends on PPC64
|
||||||
|
|
|
@ -164,6 +164,17 @@ PHONY += $(BOOT_TARGETS)
|
||||||
|
|
||||||
boot := arch/$(ARCH)/boot
|
boot := arch/$(ARCH)/boot
|
||||||
|
|
||||||
|
ifeq ($(CONFIG_RELOCATABLE),y)
|
||||||
|
quiet_cmd_relocs_check = CALL $<
|
||||||
|
cmd_relocs_check = perl $< "$(OBJDUMP)" "$(obj)/vmlinux"
|
||||||
|
|
||||||
|
PHONY += relocs_check
|
||||||
|
relocs_check: arch/powerpc/relocs_check.pl vmlinux
|
||||||
|
$(call cmd,relocs_check)
|
||||||
|
|
||||||
|
zImage: relocs_check
|
||||||
|
endif
|
||||||
|
|
||||||
$(BOOT_TARGETS): vmlinux
|
$(BOOT_TARGETS): vmlinux
|
||||||
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
|
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,16 @@ struct dev_archdata {
|
||||||
|
|
||||||
/* DMA operations on that device */
|
/* DMA operations on that device */
|
||||||
struct dma_map_ops *dma_ops;
|
struct dma_map_ops *dma_ops;
|
||||||
void *dma_data;
|
|
||||||
|
/*
|
||||||
|
* When an iommu is in use, dma_data is used as a ptr to the base of the
|
||||||
|
* iommu_table. Otherwise, it is a simple numerical offset.
|
||||||
|
*/
|
||||||
|
union {
|
||||||
|
dma_addr_t dma_offset;
|
||||||
|
void *iommu_table_base;
|
||||||
|
} dma_data;
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
dma_addr_t max_direct_dma_addr;
|
dma_addr_t max_direct_dma_addr;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -26,7 +26,6 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
void *vaddr, dma_addr_t dma_handle);
|
||||||
|
|
||||||
extern unsigned long get_dma_direct_offset(struct device *dev);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||||
/*
|
/*
|
||||||
|
@ -90,6 +89,28 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
||||||
dev->archdata.dma_ops = ops;
|
dev->archdata.dma_ops = ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get_dma_offset()
|
||||||
|
*
|
||||||
|
* Get the dma offset on configurations where the dma address can be determined
|
||||||
|
* from the physical address by looking at a simple offset. Direct dma and
|
||||||
|
* swiotlb use this function, but it is typically not used by implementations
|
||||||
|
* with an iommu.
|
||||||
|
*/
|
||||||
|
static inline dma_addr_t get_dma_offset(struct device *dev)
|
||||||
|
{
|
||||||
|
if (dev)
|
||||||
|
return dev->archdata.dma_data.dma_offset;
|
||||||
|
|
||||||
|
return PCI_DRAM_OFFSET;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
||||||
|
{
|
||||||
|
if (dev)
|
||||||
|
dev->archdata.dma_data.dma_offset = off;
|
||||||
|
}
|
||||||
|
|
||||||
/* this will be removed soon */
|
/* this will be removed soon */
|
||||||
#define flush_write_buffers()
|
#define flush_write_buffers()
|
||||||
|
|
||||||
|
@ -181,12 +202,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
|
|
||||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||||
{
|
{
|
||||||
return paddr + get_dma_direct_offset(dev);
|
return paddr + get_dma_offset(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||||
{
|
{
|
||||||
return daddr - get_dma_direct_offset(dev);
|
return daddr - get_dma_offset(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
|
|
|
@ -70,6 +70,16 @@ struct iommu_table {
|
||||||
|
|
||||||
struct scatterlist;
|
struct scatterlist;
|
||||||
|
|
||||||
|
static inline void set_iommu_table_base(struct device *dev, void *base)
|
||||||
|
{
|
||||||
|
dev->archdata.dma_data.iommu_table_base = base;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *get_iommu_table_base(struct device *dev)
|
||||||
|
{
|
||||||
|
return dev->archdata.dma_data.iommu_table_base;
|
||||||
|
}
|
||||||
|
|
||||||
/* Frees table for an individual device node */
|
/* Frees table for an individual device node */
|
||||||
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
|
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq);
|
||||||
void release_pmc_hardware(void);
|
void release_pmc_hardware(void);
|
||||||
void ppc_enable_pmcs(void);
|
void ppc_enable_pmcs(void);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
#include <asm/lppaca.h>
|
#include <asm/lppaca.h>
|
||||||
|
|
||||||
static inline void ppc_set_pmu_inuse(int inuse)
|
static inline void ppc_set_pmu_inuse(int inuse)
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
|
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
|
||||||
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
|
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
|
||||||
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
|
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
|
||||||
|
#define _PAGE_SPECIAL 0x020 /* software: Special page */
|
||||||
#define _PAGE_RW 0x040 /* software: Writes permitted */
|
#define _PAGE_RW 0x040 /* software: Writes permitted */
|
||||||
#define _PAGE_DIRTY 0x080 /* software: dirty page */
|
#define _PAGE_DIRTY 0x080 /* software: dirty page */
|
||||||
#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
|
#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
|
#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
|
||||||
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
|
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
|
||||||
#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
|
#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
|
||||||
|
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
|
||||||
|
|
||||||
/* These five software bits must be masked out when the entry is loaded
|
/* These five software bits must be masked out when the entry is loaded
|
||||||
* into the TLB.
|
* into the TLB.
|
||||||
|
|
|
@ -25,9 +25,6 @@
|
||||||
#ifndef _PAGE_WRITETHRU
|
#ifndef _PAGE_WRITETHRU
|
||||||
#define _PAGE_WRITETHRU 0
|
#define _PAGE_WRITETHRU 0
|
||||||
#endif
|
#endif
|
||||||
#ifndef _PAGE_SPECIAL
|
|
||||||
#define _PAGE_SPECIAL 0
|
|
||||||
#endif
|
|
||||||
#ifndef _PAGE_4K_PFN
|
#ifndef _PAGE_4K_PFN
|
||||||
#define _PAGE_4K_PFN 0
|
#define _PAGE_4K_PFN 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -179,7 +176,5 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
|
||||||
#define HAVE_PAGE_AGP
|
#define HAVE_PAGE_AGP
|
||||||
|
|
||||||
/* Advertise support for _PAGE_SPECIAL */
|
/* Advertise support for _PAGE_SPECIAL */
|
||||||
#ifdef _PAGE_SPECIAL
|
|
||||||
#define __HAVE_ARCH_PTE_SPECIAL
|
#define __HAVE_ARCH_PTE_SPECIAL
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flag)
|
dma_addr_t *dma_handle, gfp_t flag)
|
||||||
{
|
{
|
||||||
return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
|
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
|
||||||
dma_handle, device_to_mask(dev), flag,
|
dma_handle, device_to_mask(dev), flag,
|
||||||
dev_to_node(dev));
|
dev_to_node(dev));
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||||
static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle)
|
void *vaddr, dma_addr_t dma_handle)
|
||||||
{
|
{
|
||||||
iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
|
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Creates TCEs for a user provided buffer. The user buffer must be
|
/* Creates TCEs for a user provided buffer. The user buffer must be
|
||||||
|
@ -39,8 +39,8 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
|
||||||
enum dma_data_direction direction,
|
enum dma_data_direction direction,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size,
|
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
|
||||||
device_to_mask(dev), direction, attrs);
|
size, device_to_mask(dev), direction, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||||
size_t size, enum dma_data_direction direction,
|
size_t size, enum dma_data_direction direction,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction,
|
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
|
||||||
attrs);
|
attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
int nelems, enum dma_data_direction direction,
|
int nelems, enum dma_data_direction direction,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems,
|
return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
|
||||||
device_to_mask(dev), direction, attrs);
|
device_to_mask(dev), direction, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,14 +65,14 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
int nelems, enum dma_data_direction direction,
|
int nelems, enum dma_data_direction direction,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction,
|
iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
|
||||||
attrs);
|
attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We support DMA to/from any memory page via the iommu */
|
/* We support DMA to/from any memory page via the iommu */
|
||||||
static int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
static int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
struct iommu_table *tbl = dev->archdata.dma_data;
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
|
|
||||||
if (!tbl || tbl->it_offset > mask) {
|
if (!tbl || tbl->it_offset > mask) {
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
|
|
|
@ -21,13 +21,6 @@
|
||||||
* default the offset is PCI_DRAM_OFFSET.
|
* default the offset is PCI_DRAM_OFFSET.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
unsigned long get_dma_direct_offset(struct device *dev)
|
|
||||||
{
|
|
||||||
if (dev)
|
|
||||||
return (unsigned long)dev->archdata.dma_data;
|
|
||||||
|
|
||||||
return PCI_DRAM_OFFSET;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flag)
|
dma_addr_t *dma_handle, gfp_t flag)
|
||||||
|
@ -37,7 +30,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
|
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
*dma_handle += get_dma_direct_offset(dev);
|
*dma_handle += get_dma_offset(dev);
|
||||||
return ret;
|
return ret;
|
||||||
#else
|
#else
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -51,7 +44,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
ret = page_address(page);
|
ret = page_address(page);
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
*dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
|
*dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
#endif
|
#endif
|
||||||
|
@ -75,7 +68,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
|
sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
||||||
}
|
}
|
||||||
|
@ -110,7 +103,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||||
{
|
{
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
__dma_sync_page(page, offset, size, dir);
|
__dma_sync_page(page, offset, size, dir);
|
||||||
return page_to_phys(page) + offset + get_dma_direct_offset(dev);
|
return page_to_phys(page) + offset + get_dma_offset(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dma_direct_unmap_page(struct device *dev,
|
static inline void dma_direct_unmap_page(struct device *dev,
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <asm/reg.h>
|
|
||||||
#include <asm/exception-64e.h>
|
#include <asm/exception-64e.h>
|
||||||
#include <asm/bug.h>
|
#include <asm/bug.h>
|
||||||
#include <asm/irqflags.h>
|
#include <asm/irqflags.h>
|
||||||
|
|
|
@ -1117,7 +1117,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
|
||||||
|
|
||||||
/* Hook up default DMA ops */
|
/* Hook up default DMA ops */
|
||||||
sd->dma_ops = pci_dma_ops;
|
sd->dma_ops = pci_dma_ops;
|
||||||
sd->dma_data = (void *)PCI_DRAM_OFFSET;
|
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
|
||||||
|
|
||||||
/* Additional platform DMA/iommu setup */
|
/* Additional platform DMA/iommu setup */
|
||||||
if (ppc_md.pci_dma_dev_setup)
|
if (ppc_md.pci_dma_dev_setup)
|
||||||
|
|
|
@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void)
|
||||||
|
|
||||||
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
|
unsigned long base = mm->brk;
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
|
/*
|
||||||
|
* If we are using 1TB segments and we are allowed to randomise
|
||||||
|
* the heap, we can put it above 1TB so it is backed by a 1TB
|
||||||
|
* segment. Otherwise the heap will be in the bottom 1TB
|
||||||
|
* which always uses 256MB segments and this may result in a
|
||||||
|
* performance penalty.
|
||||||
|
*/
|
||||||
|
if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
|
||||||
|
base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ret = PAGE_ALIGN(base + brk_rnd());
|
||||||
|
|
||||||
if (ret < mm->brk)
|
if (ret < mm->brk)
|
||||||
return mm->brk;
|
return mm->brk;
|
||||||
|
|
|
@ -800,7 +800,7 @@ static void __init prom_send_capabilities(void)
|
||||||
root = call_prom("open", 1, 1, ADDR("/"));
|
root = call_prom("open", 1, 1, ADDR("/"));
|
||||||
if (root != 0) {
|
if (root != 0) {
|
||||||
/* try calling the ibm,client-architecture-support method */
|
/* try calling the ibm,client-architecture-support method */
|
||||||
prom_printf("Calling ibm,client-architecture...");
|
prom_printf("Calling ibm,client-architecture-support...");
|
||||||
if (call_prom_ret("call-method", 3, 2, &ret,
|
if (call_prom_ret("call-method", 3, 2, &ret,
|
||||||
ADDR("ibm,client-architecture-support"),
|
ADDR("ibm,client-architecture-support"),
|
||||||
root,
|
root,
|
||||||
|
@ -814,6 +814,7 @@ static void __init prom_send_capabilities(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
call_prom("close", 1, 0, root);
|
call_prom("close", 1, 0, root);
|
||||||
|
prom_printf(" not implemented\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no ibm,client-architecture-support call, try the old way */
|
/* no ibm,client-architecture-support call, try the old way */
|
||||||
|
|
|
@ -240,6 +240,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
goto fail_mmapsem;
|
goto fail_mmapsem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Put vDSO base into mm struct. We need to do this before calling
|
||||||
|
* install_special_mapping or the perf counter mmap tracking code
|
||||||
|
* will fail to recognise it as a vDSO (since arch_vma_name fails).
|
||||||
|
*/
|
||||||
|
current->mm->context.vdso_base = vdso_base;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* our vma flags don't have VM_WRITE so by default, the process isn't
|
* our vma flags don't have VM_WRITE so by default, the process isn't
|
||||||
* allowed to write those pages.
|
* allowed to write those pages.
|
||||||
|
@ -260,11 +267,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
|
||||||
VM_ALWAYSDUMP,
|
VM_ALWAYSDUMP,
|
||||||
vdso_pagelist);
|
vdso_pagelist);
|
||||||
if (rc)
|
if (rc) {
|
||||||
|
current->mm->context.vdso_base = 0;
|
||||||
goto fail_mmapsem;
|
goto fail_mmapsem;
|
||||||
|
}
|
||||||
/* Put vDSO base into mm struct */
|
|
||||||
current->mm->context.vdso_base = vdso_base;
|
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1054,6 +1054,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
|
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
|
||||||
|
if (tbl == NULL)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
|
of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
|
||||||
&tbl->it_index, &offset, &size);
|
&tbl->it_index, &offset, &size);
|
||||||
|
@ -1233,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
|
||||||
vio_cmo_set_dma_ops(viodev);
|
vio_cmo_set_dma_ops(viodev);
|
||||||
else
|
else
|
||||||
viodev->dev.archdata.dma_ops = &dma_iommu_ops;
|
viodev->dev.archdata.dma_ops = &dma_iommu_ops;
|
||||||
viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
|
set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
|
||||||
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
|
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
|
||||||
|
|
||||||
/* init generic 'struct device' fields: */
|
/* init generic 'struct device' fields: */
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
|
|
||||||
|
#include "mmu_decl.h"
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -166,7 +168,7 @@ struct page * maybe_pte_to_page(pte_t pte)
|
||||||
* support falls into the same category.
|
* support falls into the same category.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static pte_t set_pte_filter(pte_t pte)
|
static pte_t set_pte_filter(pte_t pte, unsigned long addr)
|
||||||
{
|
{
|
||||||
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
|
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
|
||||||
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
|
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
|
||||||
|
@ -175,6 +177,17 @@ static pte_t set_pte_filter(pte_t pte)
|
||||||
if (!pg)
|
if (!pg)
|
||||||
return pte;
|
return pte;
|
||||||
if (!test_bit(PG_arch_1, &pg->flags)) {
|
if (!test_bit(PG_arch_1, &pg->flags)) {
|
||||||
|
#ifdef CONFIG_8xx
|
||||||
|
/* On 8xx, cache control instructions (particularly
|
||||||
|
* "dcbst" from flush_dcache_icache) fault as write
|
||||||
|
* operation if there is an unpopulated TLB entry
|
||||||
|
* for the address in question. To workaround that,
|
||||||
|
* we invalidate the TLB here, thus avoiding dcbst
|
||||||
|
* misbehaviour.
|
||||||
|
*/
|
||||||
|
/* 8xx doesn't care about PID, size or ind args */
|
||||||
|
_tlbil_va(addr, 0, 0, 0);
|
||||||
|
#endif /* CONFIG_8xx */
|
||||||
flush_dcache_icache_page(pg);
|
flush_dcache_icache_page(pg);
|
||||||
set_bit(PG_arch_1, &pg->flags);
|
set_bit(PG_arch_1, &pg->flags);
|
||||||
}
|
}
|
||||||
|
@ -194,7 +207,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
|
||||||
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
|
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
|
||||||
* instead we "filter out" the exec permission for non clean pages.
|
* instead we "filter out" the exec permission for non clean pages.
|
||||||
*/
|
*/
|
||||||
static pte_t set_pte_filter(pte_t pte)
|
static pte_t set_pte_filter(pte_t pte, unsigned long addr)
|
||||||
{
|
{
|
||||||
struct page *pg;
|
struct page *pg;
|
||||||
|
|
||||||
|
@ -276,7 +289,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
||||||
* this context might not have been activated yet when this
|
* this context might not have been activated yet when this
|
||||||
* is called.
|
* is called.
|
||||||
*/
|
*/
|
||||||
pte = set_pte_filter(pte);
|
pte = set_pte_filter(pte, addr);
|
||||||
|
|
||||||
/* Perform the setting of the PTE */
|
/* Perform the setting of the PTE */
|
||||||
__set_pte_at(mm, addr, ptep, pte, 0);
|
__set_pte_at(mm, addr, ptep, pte, 0);
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/reg.h>
|
|
||||||
#include <asm/exception-64e.h>
|
#include <asm/exception-64e.h>
|
||||||
#include <asm/ppc-opcode.h>
|
#include <asm/ppc-opcode.h>
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ static void __init celleb_init_direct_mapping(void)
|
||||||
static void celleb_dma_dev_setup(struct device *dev)
|
static void celleb_dma_dev_setup(struct device *dev)
|
||||||
{
|
{
|
||||||
dev->archdata.dma_ops = get_pci_dma_ops();
|
dev->archdata.dma_ops = get_pci_dma_ops();
|
||||||
dev->archdata.dma_data = (void *)celleb_dma_direct_offset;
|
set_dma_offset(dev, celleb_dma_direct_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
|
static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
|
||||||
|
|
|
@ -657,15 +657,13 @@ static void cell_dma_dev_setup_fixed(struct device *dev);
|
||||||
|
|
||||||
static void cell_dma_dev_setup(struct device *dev)
|
static void cell_dma_dev_setup(struct device *dev)
|
||||||
{
|
{
|
||||||
struct dev_archdata *archdata = &dev->archdata;
|
|
||||||
|
|
||||||
/* Order is important here, these are not mutually exclusive */
|
/* Order is important here, these are not mutually exclusive */
|
||||||
if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
|
if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
|
||||||
cell_dma_dev_setup_fixed(dev);
|
cell_dma_dev_setup_fixed(dev);
|
||||||
else if (get_pci_dma_ops() == &dma_iommu_ops)
|
else if (get_pci_dma_ops() == &dma_iommu_ops)
|
||||||
archdata->dma_data = cell_get_iommu_table(dev);
|
set_iommu_table_base(dev, cell_get_iommu_table(dev));
|
||||||
else if (get_pci_dma_ops() == &dma_direct_ops)
|
else if (get_pci_dma_ops() == &dma_direct_ops)
|
||||||
archdata->dma_data = (void *)cell_dma_direct_offset;
|
set_dma_offset(dev, cell_dma_direct_offset);
|
||||||
else
|
else
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
@ -973,11 +971,10 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
|
||||||
|
|
||||||
static void cell_dma_dev_setup_fixed(struct device *dev)
|
static void cell_dma_dev_setup_fixed(struct device *dev)
|
||||||
{
|
{
|
||||||
struct dev_archdata *archdata = &dev->archdata;
|
|
||||||
u64 addr;
|
u64 addr;
|
||||||
|
|
||||||
addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
|
addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
|
||||||
archdata->dma_data = (void *)addr;
|
set_dma_offset(dev, addr);
|
||||||
|
|
||||||
dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
|
dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -193,7 +193,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
|
||||||
pdn->iommu_table = iommu_init_table(tbl, -1);
|
pdn->iommu_table = iommu_init_table(tbl, -1);
|
||||||
else
|
else
|
||||||
kfree(tbl);
|
kfree(tbl);
|
||||||
pdev->dev.archdata.dma_data = pdn->iommu_table;
|
set_iommu_table_base(&pdev->dev, pdn->iommu_table);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define pci_dma_dev_setup_iseries NULL
|
#define pci_dma_dev_setup_iseries NULL
|
||||||
|
|
|
@ -189,7 +189,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
dev->dev.archdata.dma_data = &iommu_table_iobmap;
|
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_dma_bus_setup_null(struct pci_bus *b) { }
|
static void pci_dma_bus_setup_null(struct pci_bus *b) { }
|
||||||
|
|
|
@ -482,7 +482,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
|
||||||
phb->node);
|
phb->node);
|
||||||
iommu_table_setparms(phb, dn, tbl);
|
iommu_table_setparms(phb, dn, tbl);
|
||||||
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
|
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
|
||||||
dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
|
set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -494,7 +494,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
|
||||||
dn = dn->parent;
|
dn = dn->parent;
|
||||||
|
|
||||||
if (dn && PCI_DN(dn))
|
if (dn && PCI_DN(dn))
|
||||||
dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
|
set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
|
||||||
else
|
else
|
||||||
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
|
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
|
||||||
pci_name(dev));
|
pci_name(dev));
|
||||||
|
@ -538,7 +538,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
||||||
*/
|
*/
|
||||||
if (dma_window == NULL || pdn->parent == NULL) {
|
if (dma_window == NULL || pdn->parent == NULL) {
|
||||||
pr_debug(" no dma window for device, linking to parent\n");
|
pr_debug(" no dma window for device, linking to parent\n");
|
||||||
dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
|
set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -554,7 +554,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
||||||
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
|
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->dev.archdata.dma_data = pci->iommu_table;
|
set_iommu_table_base(&dev->dev, pci->iommu_table);
|
||||||
}
|
}
|
||||||
#else /* CONFIG_PCI */
|
#else /* CONFIG_PCI */
|
||||||
#define pci_dma_bus_setup_pSeries NULL
|
#define pci_dma_bus_setup_pSeries NULL
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
#!/usr/bin/perl
|
||||||
|
|
||||||
|
# Copyright © 2009 IBM Corporation
|
||||||
|
|
||||||
|
# This program is free software; you can redistribute it and/or
|
||||||
|
# modify it under the terms of the GNU General Public License
|
||||||
|
# as published by the Free Software Foundation; either version
|
||||||
|
# 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
# This script checks the relcoations of a vmlinux for "suspicious"
|
||||||
|
# relocations.
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
if ($#ARGV != 1) {
|
||||||
|
die "$0 [path to objdump] [path to vmlinux]\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Have Kbuild supply the path to objdump so we handle cross compilation.
|
||||||
|
my $objdump = shift;
|
||||||
|
my $vmlinux = shift;
|
||||||
|
my $bad_relocs_count = 0;
|
||||||
|
my $bad_relocs = "";
|
||||||
|
my $old_binutils = 0;
|
||||||
|
|
||||||
|
open(FD, "$objdump -R $vmlinux|") or die;
|
||||||
|
while (<FD>) {
|
||||||
|
study $_;
|
||||||
|
|
||||||
|
# Only look at relcoation lines.
|
||||||
|
next if (!/\s+R_/);
|
||||||
|
|
||||||
|
# These relocations are okay
|
||||||
|
next if (/R_PPC64_RELATIVE/ or /R_PPC64_NONE/ or
|
||||||
|
/R_PPC64_ADDR64\s+mach_/);
|
||||||
|
|
||||||
|
# If we see this type of relcoation it's an idication that
|
||||||
|
# we /may/ be using an old version of binutils.
|
||||||
|
if (/R_PPC64_UADDR64/) {
|
||||||
|
$old_binutils++;
|
||||||
|
}
|
||||||
|
|
||||||
|
$bad_relocs_count++;
|
||||||
|
$bad_relocs .= $_;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($bad_relocs_count) {
|
||||||
|
print "WARNING: $bad_relocs_count bad relocations\n";
|
||||||
|
print $bad_relocs;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($old_binutils) {
|
||||||
|
print "WARNING: You need at binutils >= 2.19 to build a ".
|
||||||
|
"CONFIG_RELCOATABLE kernel\n";
|
||||||
|
}
|
|
@ -297,7 +297,7 @@ static void pci_dma_dev_setup_dart(struct pci_dev *dev)
|
||||||
/* We only have one iommu table on the mac for now, which makes
|
/* We only have one iommu table on the mac for now, which makes
|
||||||
* things simple. Setup all PCI devices to point to this table
|
* things simple. Setup all PCI devices to point to this table
|
||||||
*/
|
*/
|
||||||
dev->dev.archdata.dma_data = &iommu_table_dart;
|
set_iommu_table_base(&dev->dev, &iommu_table_dart);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
|
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
|
||||||
|
|
|
@ -335,6 +335,16 @@ int cpus_are_in_xmon(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline int unrecoverable_excp(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_4xx
|
||||||
|
/* We have no MSR_RI bit on 4xx, so we simply return false */
|
||||||
|
return 0;
|
||||||
|
#else
|
||||||
|
return ((regs->msr & MSR_RI) == 0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static int xmon_core(struct pt_regs *regs, int fromipi)
|
static int xmon_core(struct pt_regs *regs, int fromipi)
|
||||||
{
|
{
|
||||||
int cmd = 0;
|
int cmd = 0;
|
||||||
|
@ -388,7 +398,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
|
||||||
bp = NULL;
|
bp = NULL;
|
||||||
if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
|
if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
|
||||||
bp = at_breakpoint(regs->nip);
|
bp = at_breakpoint(regs->nip);
|
||||||
if (bp || (regs->msr & MSR_RI) == 0)
|
if (bp || unrecoverable_excp(regs))
|
||||||
fromipi = 0;
|
fromipi = 0;
|
||||||
|
|
||||||
if (!fromipi) {
|
if (!fromipi) {
|
||||||
|
@ -399,7 +409,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
|
||||||
cpu, BP_NUM(bp));
|
cpu, BP_NUM(bp));
|
||||||
xmon_print_symbol(regs->nip, " ", ")\n");
|
xmon_print_symbol(regs->nip, " ", ")\n");
|
||||||
}
|
}
|
||||||
if ((regs->msr & MSR_RI) == 0)
|
if (unrecoverable_excp(regs))
|
||||||
printf("WARNING: exception is not recoverable, "
|
printf("WARNING: exception is not recoverable, "
|
||||||
"can't continue\n");
|
"can't continue\n");
|
||||||
release_output_lock();
|
release_output_lock();
|
||||||
|
@ -490,7 +500,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
|
||||||
printf("Stopped at breakpoint %x (", BP_NUM(bp));
|
printf("Stopped at breakpoint %x (", BP_NUM(bp));
|
||||||
xmon_print_symbol(regs->nip, " ", ")\n");
|
xmon_print_symbol(regs->nip, " ", ")\n");
|
||||||
}
|
}
|
||||||
if ((regs->msr & MSR_RI) == 0)
|
if (unrecoverable_excp(regs))
|
||||||
printf("WARNING: exception is not recoverable, "
|
printf("WARNING: exception is not recoverable, "
|
||||||
"can't continue\n");
|
"can't continue\n");
|
||||||
remove_bpts();
|
remove_bpts();
|
||||||
|
|
|
@ -678,7 +678,7 @@ int hvc_poll(struct hvc_struct *hp)
|
||||||
EXPORT_SYMBOL_GPL(hvc_poll);
|
EXPORT_SYMBOL_GPL(hvc_poll);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hvc_resize() - Update terminal window size information.
|
* __hvc_resize() - Update terminal window size information.
|
||||||
* @hp: HVC console pointer
|
* @hp: HVC console pointer
|
||||||
* @ws: Terminal window size structure
|
* @ws: Terminal window size structure
|
||||||
*
|
*
|
||||||
|
@ -687,12 +687,12 @@ EXPORT_SYMBOL_GPL(hvc_poll);
|
||||||
*
|
*
|
||||||
* Locking: Locking free; the function MUST be called holding hp->lock
|
* Locking: Locking free; the function MUST be called holding hp->lock
|
||||||
*/
|
*/
|
||||||
void hvc_resize(struct hvc_struct *hp, struct winsize ws)
|
void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
|
||||||
{
|
{
|
||||||
hp->ws = ws;
|
hp->ws = ws;
|
||||||
schedule_work(&hp->tty_resize);
|
schedule_work(&hp->tty_resize);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hvc_resize);
|
EXPORT_SYMBOL_GPL(__hvc_resize);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This kthread is either polling or interrupt driven. This is determined by
|
* This kthread is either polling or interrupt driven. This is determined by
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#define HVC_CONSOLE_H
|
#define HVC_CONSOLE_H
|
||||||
#include <linux/kref.h>
|
#include <linux/kref.h>
|
||||||
#include <linux/tty.h>
|
#include <linux/tty.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the max number of console adapters that can/will be found as
|
* This is the max number of console adapters that can/will be found as
|
||||||
|
@ -88,7 +89,16 @@ int hvc_poll(struct hvc_struct *hp);
|
||||||
void hvc_kick(void);
|
void hvc_kick(void);
|
||||||
|
|
||||||
/* Resize hvc tty terminal window */
|
/* Resize hvc tty terminal window */
|
||||||
extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
|
extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws);
|
||||||
|
|
||||||
|
static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&hp->lock, flags);
|
||||||
|
__hvc_resize(hp, ws);
|
||||||
|
spin_unlock_irqrestore(&hp->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/* default notifier for irq based notification */
|
/* default notifier for irq based notification */
|
||||||
extern int notifier_add_irq(struct hvc_struct *hp, int data);
|
extern int notifier_add_irq(struct hvc_struct *hp, int data);
|
||||||
|
|
|
@ -273,7 +273,9 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
|
||||||
case MSG_TYPE_WINSIZE:
|
case MSG_TYPE_WINSIZE:
|
||||||
if (rb->mbuf->datalen != sizeof(struct winsize))
|
if (rb->mbuf->datalen != sizeof(struct winsize))
|
||||||
break;
|
break;
|
||||||
hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
|
/* The caller must ensure that the hvc is locked, which
|
||||||
|
* is the case when called from hvc_iucv_get_chars() */
|
||||||
|
__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case MSG_TYPE_ERROR: /* ignored ... */
|
case MSG_TYPE_ERROR: /* ignored ... */
|
||||||
|
|
|
@ -649,7 +649,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
|
||||||
u8 *p;
|
u8 *p;
|
||||||
int count;
|
int count;
|
||||||
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
|
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
|
||||||
struct circ_buf *xmit = &port->info->xmit;
|
struct circ_buf *xmit = &port->state->xmit;
|
||||||
|
|
||||||
/* Handle xon/xoff */
|
/* Handle xon/xoff */
|
||||||
if (port->x_char) {
|
if (port->x_char) {
|
||||||
|
|
Loading…
Reference in New Issue