Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [S390] Poison init section before freeing it. [S390] Use add_active_range() and free_area_init_nodes(). [S390] Virtual memmap for s390. [S390] Update documentation for dynamic subchannel mapping. [S390] Use dev->groups for adding/removing the subchannel attribute group. [S390] Support for disconnected devices reappearing on another subchannel. [S390] subchannel lock conversion. [S390] Some preparations for the dynamic subchannel mapping patch. [S390] runtime switch for qdio performance statistics [S390] New DASD feature for ERP related logging [S390] add reset call handler to the ap bus. [S390] more workqueue fixes. [S390] workqueue fixes. [S390] uaccess_pt: add missing down_read() and convert to is_init().
This commit is contained in:
commit
88032b322a
|
@ -18,11 +18,18 @@ devices/
|
|||
- 0.0.0002/
|
||||
- 0.1.0000/0.1.1234/
|
||||
...
|
||||
- defunct/
|
||||
|
||||
In this example, device 0815 is accessed via subchannel 0 in subchannel set 0,
|
||||
device 4711 via subchannel 1 in subchannel set 0, and subchannel 2 is a non-I/O
|
||||
subchannel. Device 1234 is accessed via subchannel 0 in subchannel set 1.
|
||||
|
||||
The subchannel named 'defunct' does not represent any real subchannel on the
|
||||
system; it is a pseudo subchannel where disconnnected ccw devices are moved to
|
||||
if they are displaced by another ccw device becoming operational on their
|
||||
former subchannel. The ccw devices will be moved again to a proper subchannel
|
||||
if they become operational again on that subchannel.
|
||||
|
||||
You should address a ccw device via its bus id (e.g. 0.0.4711); the device can
|
||||
be found under bus/ccw/devices/.
|
||||
|
||||
|
|
|
@ -241,8 +241,14 @@ config WARN_STACK_SIZE
|
|||
This allows you to specify the maximum frame size a function may
|
||||
have without the compiler complaining about it.
|
||||
|
||||
config ARCH_POPULATES_NODE_MAP
|
||||
def_bool y
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config HOLES_IN_ZONE
|
||||
def_bool y
|
||||
|
||||
comment "I/O subsystem configuration"
|
||||
|
||||
config MACHCHK_WARNING
|
||||
|
@ -266,14 +272,6 @@ config QDIO
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config QDIO_PERF_STATS
|
||||
bool "Performance statistics in /proc"
|
||||
depends on QDIO
|
||||
help
|
||||
Say Y here to get performance statistics in /proc/qdio_perf
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config QDIO_DEBUG
|
||||
bool "Extended debugging information"
|
||||
depends on QDIO
|
||||
|
|
|
@ -134,7 +134,6 @@ CONFIG_RESOURCES_64BIT=y
|
|||
#
|
||||
CONFIG_MACHCHK_WARNING=y
|
||||
CONFIG_QDIO=y
|
||||
# CONFIG_QDIO_PERF_STATS is not set
|
||||
# CONFIG_QDIO_DEBUG is not set
|
||||
|
||||
#
|
||||
|
|
|
@ -64,9 +64,8 @@ unsigned int console_devno = -1;
|
|||
unsigned int console_irq = -1;
|
||||
unsigned long machine_flags = 0;
|
||||
|
||||
struct mem_chunk memory_chunk[MEMORY_CHUNKS];
|
||||
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
|
||||
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
|
||||
unsigned long __initdata zholes_size[MAX_NR_ZONES];
|
||||
static unsigned long __initdata memory_end;
|
||||
|
||||
/*
|
||||
|
@ -354,21 +353,6 @@ void machine_power_off(void)
|
|||
*/
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
||||
|
||||
static void __init
|
||||
add_memory_hole(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
|
||||
|
||||
if (end <= dma_pfn)
|
||||
zholes_size[ZONE_DMA] += end - start + 1;
|
||||
else if (start > dma_pfn)
|
||||
zholes_size[ZONE_NORMAL] += end - start + 1;
|
||||
else {
|
||||
zholes_size[ZONE_DMA] += dma_pfn - start + 1;
|
||||
zholes_size[ZONE_NORMAL] += end - dma_pfn;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init early_parse_mem(char *p)
|
||||
{
|
||||
memory_end = memparse(p, &p);
|
||||
|
@ -521,7 +505,6 @@ setup_memory(void)
|
|||
{
|
||||
unsigned long bootmap_size;
|
||||
unsigned long start_pfn, end_pfn, init_pfn;
|
||||
unsigned long last_rw_end;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -577,39 +560,27 @@ setup_memory(void)
|
|||
/*
|
||||
* Register RAM areas with the bootmem allocator.
|
||||
*/
|
||||
last_rw_end = start_pfn;
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
||||
unsigned long start_chunk, end_chunk;
|
||||
unsigned long start_chunk, end_chunk, pfn;
|
||||
|
||||
if (memory_chunk[i].type != CHUNK_READ_WRITE)
|
||||
continue;
|
||||
start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
|
||||
start_chunk >>= PAGE_SHIFT;
|
||||
end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
|
||||
end_chunk >>= PAGE_SHIFT;
|
||||
if (start_chunk < start_pfn)
|
||||
start_chunk = start_pfn;
|
||||
if (end_chunk > end_pfn)
|
||||
end_chunk = end_pfn;
|
||||
if (start_chunk < end_chunk) {
|
||||
/* Initialize storage key for RAM pages */
|
||||
for (init_pfn = start_chunk ; init_pfn < end_chunk;
|
||||
init_pfn++)
|
||||
page_set_storage_key(init_pfn << PAGE_SHIFT,
|
||||
PAGE_DEFAULT_KEY);
|
||||
free_bootmem(start_chunk << PAGE_SHIFT,
|
||||
(end_chunk - start_chunk) << PAGE_SHIFT);
|
||||
if (last_rw_end < start_chunk)
|
||||
add_memory_hole(last_rw_end, start_chunk - 1);
|
||||
last_rw_end = end_chunk;
|
||||
}
|
||||
start_chunk = PFN_DOWN(memory_chunk[i].addr);
|
||||
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
|
||||
end_chunk = min(end_chunk, end_pfn);
|
||||
if (start_chunk >= end_chunk)
|
||||
continue;
|
||||
add_active_range(0, start_chunk, end_chunk);
|
||||
pfn = max(start_chunk, start_pfn);
|
||||
for (; pfn <= end_chunk; pfn++)
|
||||
page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
|
||||
}
|
||||
|
||||
psw_set_key(PAGE_DEFAULT_KEY);
|
||||
|
||||
if (last_rw_end < end_pfn - 1)
|
||||
add_memory_hole(last_rw_end, end_pfn - 1);
|
||||
free_bootmem_with_active_regions(0, max_pfn);
|
||||
reserve_bootmem(0, PFN_PHYS(start_pfn));
|
||||
|
||||
/*
|
||||
* Reserve the bootmem bitmap itself as well. We do this in two
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/futex.h>
|
||||
|
||||
static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
|
||||
|
@ -60,8 +60,9 @@ out:
|
|||
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
if (current->pid == 1) {
|
||||
if (is_init(current)) {
|
||||
yield();
|
||||
down_read(&mm->mmap_sem);
|
||||
goto survive;
|
||||
}
|
||||
printk("VM: killing process %s\n", current->comm);
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
# Makefile for the linux s390-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := init.o fault.o ioremap.o extmem.o mmap.o
|
||||
obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o
|
||||
obj-$(CONFIG_CMM) += cmm.o
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/bootmem.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/extmem.h>
|
||||
|
@ -237,65 +238,6 @@ query_segment_type (struct dcss_segment *seg)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if the given segment collides with guest storage.
|
||||
* returns 1 if this is the case, 0 if no collision was found
|
||||
*/
|
||||
static int
|
||||
segment_overlaps_storage(struct dcss_segment *seg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
||||
if (memory_chunk[i].type != CHUNK_READ_WRITE)
|
||||
continue;
|
||||
if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
|
||||
continue;
|
||||
if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20)
|
||||
< (seg->start_addr >> 20))
|
||||
continue;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if segment collides with other segments that are currently loaded
|
||||
* returns 1 if this is the case, 0 if no collision was found
|
||||
*/
|
||||
static int
|
||||
segment_overlaps_others (struct dcss_segment *seg)
|
||||
{
|
||||
struct list_head *l;
|
||||
struct dcss_segment *tmp;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dcss_lock));
|
||||
list_for_each(l, &dcss_list) {
|
||||
tmp = list_entry(l, struct dcss_segment, list);
|
||||
if ((tmp->start_addr >> 20) > (seg->end >> 20))
|
||||
continue;
|
||||
if ((tmp->end >> 20) < (seg->start_addr >> 20))
|
||||
continue;
|
||||
if (seg == tmp)
|
||||
continue;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if segment exceeds the kernel mapping range (detected or set via mem=)
|
||||
* returns 1 if this is the case, 0 if segment fits into the range
|
||||
*/
|
||||
static inline int
|
||||
segment_exceeds_range (struct dcss_segment *seg)
|
||||
{
|
||||
int seg_last_pfn = (seg->end) >> PAGE_SHIFT;
|
||||
if (seg_last_pfn > max_pfn)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* get info about a segment
|
||||
* possible return values:
|
||||
|
@ -341,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
|
|||
rc = query_segment_type (seg);
|
||||
if (rc < 0)
|
||||
goto out_free;
|
||||
if (segment_exceeds_range(seg)) {
|
||||
PRINT_WARN ("segment_load: not loading segment %s - exceeds"
|
||||
" kernel mapping range\n",name);
|
||||
rc = -ERANGE;
|
||||
goto out_free;
|
||||
}
|
||||
if (segment_overlaps_storage(seg)) {
|
||||
PRINT_WARN ("segment_load: not loading segment %s - overlaps"
|
||||
" storage\n",name);
|
||||
rc = -ENOSPC;
|
||||
goto out_free;
|
||||
}
|
||||
if (segment_overlaps_others(seg)) {
|
||||
PRINT_WARN ("segment_load: not loading segment %s - overlaps"
|
||||
" other segments\n",name);
|
||||
rc = -EBUSY;
|
||||
|
||||
rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
|
||||
switch (rc) {
|
||||
case 0:
|
||||
break;
|
||||
case -ENOSPC:
|
||||
PRINT_WARN("segment_load: not loading segment %s - overlaps "
|
||||
"storage/segment\n", name);
|
||||
goto out_free;
|
||||
case -ERANGE:
|
||||
PRINT_WARN("segment_load: not loading segment %s - exceeds "
|
||||
"kernel mapping range\n", name);
|
||||
goto out_free;
|
||||
default:
|
||||
PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
|
||||
name, rc);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (do_nonshared)
|
||||
dcss_command = DCSS_LOADNSR;
|
||||
else
|
||||
|
@ -372,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
|
|||
rc = dcss_diag_translate_rc (seg->end);
|
||||
dcss_diag(DCSS_PURGESEG, seg->dcss_name,
|
||||
&seg->start_addr, &seg->end);
|
||||
goto out_free;
|
||||
goto out_shared;
|
||||
}
|
||||
seg->do_nonshared = do_nonshared;
|
||||
atomic_set(&seg->ref_count, 1);
|
||||
|
@ -391,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
|
|||
(void*)seg->start_addr, (void*)seg->end,
|
||||
segtype_string[seg->vm_segtype]);
|
||||
goto out;
|
||||
out_shared:
|
||||
remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
out_free:
|
||||
kfree(seg);
|
||||
out:
|
||||
|
@ -530,12 +476,12 @@ segment_unload(char *name)
|
|||
"please report to linux390@de.ibm.com\n",name);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (atomic_dec_return(&seg->ref_count) == 0) {
|
||||
list_del(&seg->list);
|
||||
dcss_diag(DCSS_PURGESEG, seg->dcss_name,
|
||||
&dummy, &dummy);
|
||||
kfree(seg);
|
||||
}
|
||||
if (atomic_dec_return(&seg->ref_count) != 0)
|
||||
goto out_unlock;
|
||||
remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
list_del(&seg->list);
|
||||
dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
|
||||
kfree(seg);
|
||||
out_unlock:
|
||||
mutex_unlock(&dcss_lock);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/poison.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
|
@ -69,6 +70,8 @@ void show_mem(void)
|
|||
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
|
||||
i = max_mapnr;
|
||||
while (i-- > 0) {
|
||||
if (!pfn_valid(i))
|
||||
continue;
|
||||
page = pfn_to_page(i);
|
||||
total++;
|
||||
if (PageReserved(page))
|
||||
|
@ -84,150 +87,52 @@ void show_mem(void)
|
|||
printk("%d pages swap cached\n",cached);
|
||||
}
|
||||
|
||||
extern unsigned long __initdata zholes_size[];
|
||||
static void __init setup_ro_region(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t new_pte;
|
||||
unsigned long address, end;
|
||||
|
||||
address = ((unsigned long)&__start_rodata) & PAGE_MASK;
|
||||
end = PFN_ALIGN((unsigned long)&__end_rodata);
|
||||
|
||||
for (; address < end; address += PAGE_SIZE) {
|
||||
pgd = pgd_offset_k(address);
|
||||
pmd = pmd_offset(pgd, address);
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
|
||||
set_pte(pte, new_pte);
|
||||
}
|
||||
}
|
||||
|
||||
extern void vmem_map_init(void);
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
void __init paging_init(void)
|
||||
{
|
||||
pgd_t * pg_dir;
|
||||
pte_t * pg_table;
|
||||
pte_t pte;
|
||||
int i;
|
||||
unsigned long tmp;
|
||||
unsigned long pfn = 0;
|
||||
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
|
||||
static const int ssm_mask = 0x04000000L;
|
||||
unsigned long ro_start_pfn, ro_end_pfn;
|
||||
unsigned long zones_size[MAX_NR_ZONES];
|
||||
|
||||
ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
|
||||
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
|
||||
|
||||
memset(zones_size, 0, sizeof(zones_size));
|
||||
zones_size[ZONE_DMA] = max_low_pfn;
|
||||
free_area_init_node(0, &contig_page_data, zones_size,
|
||||
__pa(PAGE_OFFSET) >> PAGE_SHIFT,
|
||||
zholes_size);
|
||||
|
||||
/* unmap whole virtual address space */
|
||||
|
||||
pg_dir = swapper_pg_dir;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PGD; i++)
|
||||
pmd_clear((pmd_t *) pg_dir++);
|
||||
|
||||
/*
|
||||
* map whole physical memory to virtual memory (identity mapping)
|
||||
*/
|
||||
|
||||
pg_dir = swapper_pg_dir;
|
||||
|
||||
while (pfn < max_low_pfn) {
|
||||
/*
|
||||
* pg_table is physical at this point
|
||||
*/
|
||||
pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
|
||||
|
||||
pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
|
||||
pg_dir++;
|
||||
|
||||
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
|
||||
if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
|
||||
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
|
||||
else
|
||||
pte = pfn_pte(pfn, PAGE_KERNEL);
|
||||
if (pfn >= max_low_pfn)
|
||||
pte_val(pte) = _PAGE_TYPE_EMPTY;
|
||||
set_pte(pg_table, pte);
|
||||
pfn++;
|
||||
}
|
||||
}
|
||||
|
||||
S390_lowcore.kernel_asce = pgdir_k;
|
||||
|
||||
/* enable virtual mapping in kernel mode */
|
||||
__ctl_load(pgdir_k, 1, 1);
|
||||
__ctl_load(pgdir_k, 7, 7);
|
||||
__ctl_load(pgdir_k, 13, 13);
|
||||
__raw_local_irq_ssm(ssm_mask);
|
||||
|
||||
local_flush_tlb();
|
||||
}
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
pgd_t * pg_dir;
|
||||
pmd_t * pm_dir;
|
||||
pte_t * pt_dir;
|
||||
pte_t pte;
|
||||
int i,j,k;
|
||||
unsigned long pfn = 0;
|
||||
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
|
||||
_KERN_REGION_TABLE;
|
||||
pgd_t *pg_dir;
|
||||
int i;
|
||||
unsigned long pgdir_k;
|
||||
static const int ssm_mask = 0x04000000L;
|
||||
unsigned long zones_size[MAX_NR_ZONES];
|
||||
unsigned long dma_pfn, high_pfn;
|
||||
unsigned long ro_start_pfn, ro_end_pfn;
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
memset(zones_size, 0, sizeof(zones_size));
|
||||
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
|
||||
high_pfn = max_low_pfn;
|
||||
ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
|
||||
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
|
||||
pg_dir = swapper_pg_dir;
|
||||
|
||||
if (dma_pfn > high_pfn)
|
||||
zones_size[ZONE_DMA] = high_pfn;
|
||||
else {
|
||||
zones_size[ZONE_DMA] = dma_pfn;
|
||||
zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
|
||||
}
|
||||
|
||||
/* Initialize mem_map[]. */
|
||||
free_area_init_node(0, &contig_page_data, zones_size,
|
||||
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
|
||||
|
||||
/*
|
||||
* map whole physical memory to virtual memory (identity mapping)
|
||||
*/
|
||||
|
||||
pg_dir = swapper_pg_dir;
|
||||
|
||||
for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
|
||||
|
||||
if (pfn >= max_low_pfn) {
|
||||
pgd_clear(pg_dir);
|
||||
continue;
|
||||
}
|
||||
|
||||
pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4);
|
||||
pgd_populate(&init_mm, pg_dir, pm_dir);
|
||||
|
||||
for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
|
||||
if (pfn >= max_low_pfn) {
|
||||
pmd_clear(pm_dir);
|
||||
continue;
|
||||
}
|
||||
|
||||
pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
|
||||
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
|
||||
|
||||
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
|
||||
if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
|
||||
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
|
||||
else
|
||||
pte = pfn_pte(pfn, PAGE_KERNEL);
|
||||
if (pfn >= max_low_pfn)
|
||||
pte_val(pte) = _PAGE_TYPE_EMPTY;
|
||||
set_pte(pt_dir, pte);
|
||||
pfn++;
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_64BIT
|
||||
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
|
||||
for (i = 0; i < PTRS_PER_PGD; i++)
|
||||
pgd_clear(pg_dir + i);
|
||||
#else
|
||||
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
|
||||
for (i = 0; i < PTRS_PER_PGD; i++)
|
||||
pmd_clear((pmd_t *)(pg_dir + i));
|
||||
#endif
|
||||
vmem_map_init();
|
||||
setup_ro_region();
|
||||
|
||||
S390_lowcore.kernel_asce = pgdir_k;
|
||||
|
||||
|
@ -237,9 +142,11 @@ void __init paging_init(void)
|
|||
__ctl_load(pgdir_k, 13, 13);
|
||||
__raw_local_irq_ssm(ssm_mask);
|
||||
|
||||
local_flush_tlb();
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
|
@ -269,6 +176,8 @@ void __init mem_init(void)
|
|||
printk("Write protected kernel read-only data: %#lx - %#lx\n",
|
||||
(unsigned long)&__start_rodata,
|
||||
PFN_ALIGN((unsigned long)&__end_rodata) - 1);
|
||||
printk("Virtual memmap size: %ldk\n",
|
||||
(max_pfn * sizeof(struct page)) >> 10);
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
|
@ -279,6 +188,7 @@ void free_initmem(void)
|
|||
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
init_page_count(virt_to_page(addr));
|
||||
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,381 @@
|
|||
/*
|
||||
* arch/s390/mm/vmem.c
|
||||
*
|
||||
* Copyright IBM Corp. 2006
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
unsigned long vmalloc_end;
|
||||
EXPORT_SYMBOL(vmalloc_end);
|
||||
|
||||
static struct page *vmem_map;
|
||||
static DEFINE_MUTEX(vmem_mutex);
|
||||
|
||||
struct memory_segment {
|
||||
struct list_head list;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
static LIST_HEAD(mem_segs);
|
||||
|
||||
void memmap_init(unsigned long size, int nid, unsigned long zone,
|
||||
unsigned long start_pfn)
|
||||
{
|
||||
struct page *start, *end;
|
||||
struct page *map_start, *map_end;
|
||||
int i;
|
||||
|
||||
start = pfn_to_page(start_pfn);
|
||||
end = start + size;
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
||||
unsigned long cstart, cend;
|
||||
|
||||
cstart = PFN_DOWN(memory_chunk[i].addr);
|
||||
cend = cstart + PFN_DOWN(memory_chunk[i].size);
|
||||
|
||||
map_start = mem_map + cstart;
|
||||
map_end = mem_map + cend;
|
||||
|
||||
if (map_start < start)
|
||||
map_start = start;
|
||||
if (map_end > end)
|
||||
map_end = end;
|
||||
|
||||
map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
|
||||
/ sizeof(struct page);
|
||||
map_end += ((PFN_ALIGN((unsigned long) map_end)
|
||||
- (unsigned long) map_end)
|
||||
/ sizeof(struct page));
|
||||
|
||||
if (map_start < map_end)
|
||||
memmap_init_zone((unsigned long)(map_end - map_start),
|
||||
nid, zone, page_to_pfn(map_start));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *vmem_alloc_pages(unsigned int order)
|
||||
{
|
||||
if (slab_is_available())
|
||||
return (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline pmd_t *vmem_pmd_alloc(void)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
||||
pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
|
||||
if (!pmd)
|
||||
return NULL;
|
||||
for (i = 0; i < PTRS_PER_PMD; i++)
|
||||
pmd_clear(pmd + i);
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pte_t *vmem_pte_alloc(void)
|
||||
{
|
||||
pte_t *pte;
|
||||
pte_t empty_pte;
|
||||
int i;
|
||||
|
||||
pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
set_pte(pte + i, empty_pte);
|
||||
return pte;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a physical memory range to the 1:1 mapping.
|
||||
*/
|
||||
static int vmem_add_range(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long address;
|
||||
pgd_t *pg_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
pte_t pte;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
for (address = start; address < start + size; address += PAGE_SIZE) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
pm_dir = vmem_pmd_alloc();
|
||||
if (!pm_dir)
|
||||
goto out;
|
||||
pgd_populate(&init_mm, pg_dir, pm_dir);
|
||||
}
|
||||
|
||||
pm_dir = pmd_offset(pg_dir, address);
|
||||
if (pmd_none(*pm_dir)) {
|
||||
pt_dir = vmem_pte_alloc();
|
||||
if (!pt_dir)
|
||||
goto out;
|
||||
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
set_pte(pt_dir, pte);
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
flush_tlb_kernel_range(start, start + size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a physical memory range from the 1:1 mapping.
|
||||
* Currently only invalidates page table entries.
|
||||
*/
|
||||
static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long address;
|
||||
pgd_t *pg_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
pte_t pte;
|
||||
|
||||
pte_val(pte) = _PAGE_TYPE_EMPTY;
|
||||
for (address = start; address < start + size; address += PAGE_SIZE) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir))
|
||||
continue;
|
||||
pm_dir = pmd_offset(pg_dir, address);
|
||||
if (pmd_none(*pm_dir))
|
||||
continue;
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
set_pte(pt_dir, pte);
|
||||
}
|
||||
flush_tlb_kernel_range(start, start + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a backed mem_map array to the virtual mem_map array.
|
||||
*/
|
||||
static int vmem_add_mem_map(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long address, start_addr, end_addr;
|
||||
struct page *map_start, *map_end;
|
||||
pgd_t *pg_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
pte_t pte;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
map_start = vmem_map + PFN_DOWN(start);
|
||||
map_end = vmem_map + PFN_DOWN(start + size);
|
||||
|
||||
start_addr = (unsigned long) map_start & PAGE_MASK;
|
||||
end_addr = PFN_ALIGN((unsigned long) map_end);
|
||||
|
||||
for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
pm_dir = vmem_pmd_alloc();
|
||||
if (!pm_dir)
|
||||
goto out;
|
||||
pgd_populate(&init_mm, pg_dir, pm_dir);
|
||||
}
|
||||
|
||||
pm_dir = pmd_offset(pg_dir, address);
|
||||
if (pmd_none(*pm_dir)) {
|
||||
pt_dir = vmem_pte_alloc();
|
||||
if (!pt_dir)
|
||||
goto out;
|
||||
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
if (pte_none(*pt_dir)) {
|
||||
unsigned long new_page;
|
||||
|
||||
new_page =__pa(vmem_alloc_pages(0));
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
set_pte(pt_dir, pte);
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
flush_tlb_kernel_range(start_addr, end_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmem_add_mem(unsigned long start, unsigned long size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vmem_add_range(start, size);
|
||||
if (ret)
|
||||
return ret;
|
||||
return vmem_add_mem_map(start, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add memory segment to the segment list if it doesn't overlap with
|
||||
* an already present segment.
|
||||
*/
|
||||
static int insert_memory_segment(struct memory_segment *seg)
|
||||
{
|
||||
struct memory_segment *tmp;
|
||||
|
||||
if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
|
||||
seg->start + seg->size < seg->start)
|
||||
return -ERANGE;
|
||||
|
||||
list_for_each_entry(tmp, &mem_segs, list) {
|
||||
if (seg->start >= tmp->start + tmp->size)
|
||||
continue;
|
||||
if (seg->start + seg->size <= tmp->start)
|
||||
continue;
|
||||
return -ENOSPC;
|
||||
}
|
||||
list_add(&seg->list, &mem_segs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove memory segment from the segment list.
|
||||
*/
|
||||
static void remove_memory_segment(struct memory_segment *seg)
|
||||
{
|
||||
list_del(&seg->list);
|
||||
}
|
||||
|
||||
static void __remove_shared_memory(struct memory_segment *seg)
|
||||
{
|
||||
remove_memory_segment(seg);
|
||||
vmem_remove_range(seg->start, seg->size);
|
||||
}
|
||||
|
||||
int remove_shared_memory(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct memory_segment *seg;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
|
||||
ret = -ENOENT;
|
||||
list_for_each_entry(seg, &mem_segs, list) {
|
||||
if (seg->start == start && seg->size == size)
|
||||
break;
|
||||
}
|
||||
|
||||
if (seg->start != start || seg->size != size)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
__remove_shared_memory(seg);
|
||||
kfree(seg);
|
||||
out:
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int add_shared_memory(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct memory_segment *seg;
|
||||
struct page *page;
|
||||
unsigned long pfn, num_pfn, end_pfn;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
ret = -ENOMEM;
|
||||
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
||||
if (!seg)
|
||||
goto out;
|
||||
seg->start = start;
|
||||
seg->size = size;
|
||||
|
||||
ret = insert_memory_segment(seg);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = vmem_add_mem(start, size);
|
||||
if (ret)
|
||||
goto out_remove;
|
||||
|
||||
pfn = PFN_DOWN(start);
|
||||
num_pfn = PFN_DOWN(size);
|
||||
end_pfn = pfn + num_pfn;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
memset(page, 0, num_pfn * sizeof(struct page));
|
||||
|
||||
for (; pfn < end_pfn; pfn++) {
|
||||
page = pfn_to_page(pfn);
|
||||
init_page_count(page);
|
||||
reset_page_mapcount(page);
|
||||
SetPageReserved(page);
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
}
|
||||
goto out;
|
||||
|
||||
out_remove:
|
||||
__remove_shared_memory(seg);
|
||||
out_free:
|
||||
kfree(seg);
|
||||
out:
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* map whole physical memory to virtual memory (identity mapping)
|
||||
*/
|
||||
void __init vmem_map_init(void)
|
||||
{
|
||||
unsigned long map_size;
|
||||
int i;
|
||||
|
||||
map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
|
||||
vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
|
||||
vmem_map = (struct page *) vmalloc_end;
|
||||
NODE_DATA(0)->node_mem_map = vmem_map;
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
|
||||
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert memory chunk array to a memory segment list so there is a single
|
||||
* list that contains both r/w memory and shared memory segments.
|
||||
*/
|
||||
static int __init vmem_convert_memory_chunk(void)
|
||||
{
|
||||
struct memory_segment *seg;
|
||||
int i;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
||||
if (!memory_chunk[i].size)
|
||||
continue;
|
||||
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
||||
if (!seg)
|
||||
panic("Out of memory...\n");
|
||||
seg->start = memory_chunk[i].addr;
|
||||
seg->size = memory_chunk[i].size;
|
||||
insert_memory_segment(seg);
|
||||
}
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_initcall(vmem_convert_memory_chunk);
|
|
@ -1050,10 +1050,10 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
}
|
||||
} else { /* error */
|
||||
memcpy(&cqr->irb, irb, sizeof (struct irb));
|
||||
#ifdef ERP_DEBUG
|
||||
/* dump sense data */
|
||||
dasd_log_sense(cqr, irb);
|
||||
#endif
|
||||
if (device->features & DASD_FEATURE_ERPLOG) {
|
||||
/* dump sense data */
|
||||
dasd_log_sense(cqr, irb);
|
||||
}
|
||||
switch (era) {
|
||||
case dasd_era_fatal:
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
|
|
|
@ -2641,14 +2641,12 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
|
|||
struct dasd_ccw_req *erp = NULL;
|
||||
struct dasd_device *device = cqr->device;
|
||||
__u32 cpa = cqr->irb.scsw.cpa;
|
||||
struct dasd_ccw_req *temp_erp = NULL;
|
||||
|
||||
#ifdef ERP_DEBUG
|
||||
/* print current erp_chain */
|
||||
DEV_MESSAGE(KERN_ERR, device, "%s",
|
||||
"ERP chain at BEGINNING of ERP-ACTION");
|
||||
{
|
||||
struct dasd_ccw_req *temp_erp = NULL;
|
||||
|
||||
if (device->features & DASD_FEATURE_ERPLOG) {
|
||||
/* print current erp_chain */
|
||||
DEV_MESSAGE(KERN_ERR, device, "%s",
|
||||
"ERP chain at BEGINNING of ERP-ACTION");
|
||||
for (temp_erp = cqr;
|
||||
temp_erp != NULL; temp_erp = temp_erp->refers) {
|
||||
|
||||
|
@ -2658,7 +2656,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
|
|||
temp_erp->refers);
|
||||
}
|
||||
}
|
||||
#endif /* ERP_DEBUG */
|
||||
|
||||
/* double-check if current erp/cqr was successfull */
|
||||
if ((cqr->irb.scsw.cstat == 0x00) &&
|
||||
|
@ -2695,11 +2692,10 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
|
|||
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
|
||||
}
|
||||
|
||||
#ifdef ERP_DEBUG
|
||||
/* print current erp_chain */
|
||||
DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION");
|
||||
{
|
||||
struct dasd_ccw_req *temp_erp = NULL;
|
||||
if (device->features & DASD_FEATURE_ERPLOG) {
|
||||
/* print current erp_chain */
|
||||
DEV_MESSAGE(KERN_ERR, device, "%s",
|
||||
"ERP chain at END of ERP-ACTION");
|
||||
for (temp_erp = erp;
|
||||
temp_erp != NULL; temp_erp = temp_erp->refers) {
|
||||
|
||||
|
@ -2709,7 +2705,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
|
|||
temp_erp->refers);
|
||||
}
|
||||
}
|
||||
#endif /* ERP_DEBUG */
|
||||
|
||||
if (erp->status == DASD_CQR_FAILED)
|
||||
dasd_log_ccw(erp, 1, cpa);
|
||||
|
|
|
@ -202,6 +202,8 @@ dasd_feature_list(char *str, char **endp)
|
|||
features |= DASD_FEATURE_READONLY;
|
||||
else if (len == 4 && !strncmp(str, "diag", 4))
|
||||
features |= DASD_FEATURE_USEDIAG;
|
||||
else if (len == 6 && !strncmp(str, "erplog", 6))
|
||||
features |= DASD_FEATURE_ERPLOG;
|
||||
else {
|
||||
MESSAGE(KERN_WARNING,
|
||||
"unsupported feature: %*s, "
|
||||
|
@ -709,6 +711,52 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
|
||||
static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
|
||||
/*
|
||||
* erplog controls the logging of ERP related data
|
||||
* (e.g. failing channel programs).
|
||||
*/
|
||||
static ssize_t
|
||||
dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int erplog;
|
||||
|
||||
devmap = dasd_find_busid(dev->bus_id);
|
||||
if (!IS_ERR(devmap))
|
||||
erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
|
||||
else
|
||||
erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
|
||||
return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
dasd_erplog_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int val;
|
||||
char *endp;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
val = simple_strtoul(buf, &endp, 0);
|
||||
if (((endp + 1) < (buf + count)) || (val > 1))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (val)
|
||||
devmap->features |= DASD_FEATURE_ERPLOG;
|
||||
else
|
||||
devmap->features &= ~DASD_FEATURE_ERPLOG;
|
||||
if (devmap->device)
|
||||
devmap->device->features = devmap->features;
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
|
||||
|
||||
/*
|
||||
* use_diag controls whether the driver should use diag rather than ssch
|
||||
|
@ -896,6 +944,7 @@ static struct attribute * dasd_attrs[] = {
|
|||
&dev_attr_uid.attr,
|
||||
&dev_attr_use_diag.attr,
|
||||
&dev_attr_eer_enabled.attr,
|
||||
&dev_attr_erplog.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -13,10 +13,6 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* erp debugging in dasd.c and dasd_3990_erp.c */
|
||||
#define ERP_DEBUG
|
||||
|
||||
|
||||
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
|
||||
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
|
||||
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
|
||||
|
|
|
@ -16,14 +16,15 @@
|
|||
|
||||
#ifdef CONFIG_MAGIC_SYSRQ
|
||||
static int ctrlchar_sysrq_key;
|
||||
static struct tty_struct *sysrq_tty;
|
||||
|
||||
static void
|
||||
ctrlchar_handle_sysrq(void *tty)
|
||||
ctrlchar_handle_sysrq(struct work_struct *work)
|
||||
{
|
||||
handle_sysrq(ctrlchar_sysrq_key, (struct tty_struct *) tty);
|
||||
handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL);
|
||||
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -53,7 +54,7 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
|
|||
/* racy */
|
||||
if (len == 3 && buf[1] == '-') {
|
||||
ctrlchar_sysrq_key = buf[2];
|
||||
ctrlchar_work.data = tty;
|
||||
sysrq_tty = tty;
|
||||
schedule_work(&ctrlchar_work);
|
||||
return CTRLCHAR_SYSRQ;
|
||||
}
|
||||
|
|
|
@ -179,6 +179,7 @@ struct tape_char_data {
|
|||
/* Block Frontend Data */
|
||||
struct tape_blk_data
|
||||
{
|
||||
struct tape_device * device;
|
||||
/* Block device request queue. */
|
||||
request_queue_t * request_queue;
|
||||
spinlock_t request_queue_lock;
|
||||
|
@ -240,7 +241,7 @@ struct tape_device {
|
|||
#endif
|
||||
|
||||
/* Function to start or stop the next request later. */
|
||||
struct work_struct tape_dnr;
|
||||
struct delayed_work tape_dnr;
|
||||
};
|
||||
|
||||
/* Externals from tape_core.c */
|
||||
|
|
|
@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device)
|
|||
return rc;
|
||||
}
|
||||
|
||||
struct tape_34xx_work {
|
||||
struct tape_device *device;
|
||||
enum tape_op op;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
/*
|
||||
* These functions are currently used only to schedule a medium_sense for
|
||||
* later execution. This is because we get an interrupt whenever a medium
|
||||
|
@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device)
|
|||
* interrupt handler.
|
||||
*/
|
||||
static void
|
||||
tape_34xx_work_handler(void *data)
|
||||
tape_34xx_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct {
|
||||
struct tape_device *device;
|
||||
enum tape_op op;
|
||||
struct work_struct work;
|
||||
} *p = data;
|
||||
struct tape_34xx_work *p =
|
||||
container_of(work, struct tape_34xx_work, work);
|
||||
|
||||
switch(p->op) {
|
||||
case TO_MSEN:
|
||||
|
@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data)
|
|||
static int
|
||||
tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
|
||||
{
|
||||
struct {
|
||||
struct tape_device *device;
|
||||
enum tape_op op;
|
||||
struct work_struct work;
|
||||
} *p;
|
||||
struct tape_34xx_work *p;
|
||||
|
||||
if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(p, 0, sizeof(*p));
|
||||
INIT_WORK(&p->work, tape_34xx_work_handler, p);
|
||||
INIT_WORK(&p->work, tape_34xx_work_handler);
|
||||
|
||||
p->device = tape_get_device_reference(device);
|
||||
p->op = op;
|
||||
|
|
|
@ -236,9 +236,10 @@ struct work_handler_data {
|
|||
};
|
||||
|
||||
static void
|
||||
tape_3590_work_handler(void *data)
|
||||
tape_3590_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct work_handler_data *p = data;
|
||||
struct work_handler_data *p =
|
||||
container_of(work, struct work_handler_data, work);
|
||||
|
||||
switch (p->op) {
|
||||
case TO_MSEN:
|
||||
|
@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
|
|||
if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&p->work, tape_3590_work_handler, p);
|
||||
INIT_WORK(&p->work, tape_3590_work_handler);
|
||||
|
||||
p->device = tape_get_device_reference(device);
|
||||
p->op = op;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
|
||||
|
@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
|
|||
* queue.
|
||||
*/
|
||||
static void
|
||||
tapeblock_requeue(void *data) {
|
||||
tapeblock_requeue(struct work_struct *work) {
|
||||
struct tape_blk_data * blkdat;
|
||||
struct tape_device * device;
|
||||
request_queue_t * queue;
|
||||
int nr_queued;
|
||||
|
@ -151,7 +153,8 @@ tapeblock_requeue(void *data) {
|
|||
struct list_head * l;
|
||||
int rc;
|
||||
|
||||
device = (struct tape_device *) data;
|
||||
blkdat = container_of(work, struct tape_blk_data, requeue_task);
|
||||
device = blkdat->device;
|
||||
if (!device)
|
||||
return;
|
||||
|
||||
|
@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device)
|
|||
int rc;
|
||||
|
||||
blkdat = &device->blk_data;
|
||||
blkdat->device = device;
|
||||
spin_lock_init(&blkdat->request_queue_lock);
|
||||
atomic_set(&blkdat->requeue_scheduled, 0);
|
||||
|
||||
|
@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device)
|
|||
|
||||
add_disk(disk);
|
||||
|
||||
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue,
|
||||
tape_get_device_reference(device));
|
||||
tape_get_device_reference(device);
|
||||
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -271,7 +275,7 @@ void
|
|||
tapeblock_cleanup_device(struct tape_device *device)
|
||||
{
|
||||
flush_scheduled_work();
|
||||
device->blk_data.requeue_task.data = tape_put_device(device);
|
||||
tape_put_device(device);
|
||||
|
||||
if (!device->blk_data.disk) {
|
||||
PRINT_ERR("(%s): No gendisk to clean up!\n",
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define PRINTK_HEADER "TAPE_CORE: "
|
||||
|
||||
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
|
||||
static void tape_delayed_next_request(void * data);
|
||||
static void tape_delayed_next_request(struct work_struct *);
|
||||
|
||||
/*
|
||||
* One list to contain all tape devices of all disciplines, so
|
||||
|
@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request)
|
|||
return 0;
|
||||
case -EBUSY:
|
||||
request->status = TAPE_REQUEST_CANCEL;
|
||||
schedule_work(&device->tape_dnr);
|
||||
schedule_delayed_work(&device->tape_dnr, 0);
|
||||
return 0;
|
||||
case -ENODEV:
|
||||
DBF_EXCEPTION(2, "device gone, retry\n");
|
||||
|
@ -470,7 +470,7 @@ tape_alloc_device(void)
|
|||
*device->modeset_byte = 0;
|
||||
device->first_minor = -1;
|
||||
atomic_set(&device->ref_count, 1);
|
||||
INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device);
|
||||
INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
|
||||
|
||||
return device;
|
||||
}
|
||||
|
@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
|
|||
} else if (rc == -EBUSY) {
|
||||
/* The common I/O subsystem is currently busy. Retry later. */
|
||||
request->status = TAPE_REQUEST_QUEUED;
|
||||
schedule_work(&device->tape_dnr);
|
||||
schedule_delayed_work(&device->tape_dnr, 0);
|
||||
rc = 0;
|
||||
} else {
|
||||
/* Start failed. Remove request and indicate failure. */
|
||||
|
@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device)
|
|||
}
|
||||
|
||||
static void
|
||||
tape_delayed_next_request(void *data)
|
||||
tape_delayed_next_request(struct work_struct *work)
|
||||
{
|
||||
struct tape_device * device;
|
||||
struct tape_device *device =
|
||||
container_of(work, struct tape_device, tape_dnr.work);
|
||||
|
||||
device = (struct tape_device *) data;
|
||||
DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
|
||||
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
||||
__tape_start_next_request(device);
|
||||
|
|
|
@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch)
|
|||
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
ret = chsc_get_sch_desc_irq(sch, page);
|
||||
if (ret) {
|
||||
static int cio_chsc_err_msg;
|
||||
|
@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch)
|
|||
cio_chsc_err_msg = 1;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
free_page((unsigned long)page);
|
||||
if (!ret) {
|
||||
int j, chpid, mask;
|
||||
|
@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
|
|||
if (j >= 8)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
|
||||
stsch(sch->schid, &schib);
|
||||
if (!schib.pmcw.dnv)
|
||||
|
@ -265,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
|
|||
else if (sch->lpm == mask)
|
||||
goto out_unreg;
|
||||
out_unlock:
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
return 0;
|
||||
out_unreg:
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
sch->lpm = 0;
|
||||
if (css_enqueue_subchannel_slow(sch->schid)) {
|
||||
css_clear_subchannel_slow_list();
|
||||
|
@ -378,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
|
|||
/* Check if a subchannel is newly available. */
|
||||
return s390_process_res_acc_new_sch(schid);
|
||||
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
|
||||
chp_mask = s390_process_res_acc_sch(res_data, sch);
|
||||
|
||||
if (chp_mask == 0) {
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
put_device(&sch->dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -397,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
|
|||
else if (sch->driver && sch->driver->verify)
|
||||
sch->driver->verify(&sch->dev);
|
||||
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
put_device(&sch->dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -635,21 +635,21 @@ __chp_add(struct subchannel_id schid, void *data)
|
|||
if (!sch)
|
||||
/* Check if the subchannel is now available. */
|
||||
return __chp_add_new_sch(schid);
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
for (i=0; i<8; i++) {
|
||||
mask = 0x80 >> i;
|
||||
if ((sch->schib.pmcw.pim & mask) &&
|
||||
(sch->schib.pmcw.chpid[i] == chp->id)) {
|
||||
if (stsch(sch->schid, &sch->schib) != 0) {
|
||||
/* Endgame. */
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
return -ENXIO;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i==8) {
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
return 0;
|
||||
}
|
||||
sch->lpm = ((sch->schib.pmcw.pim &
|
||||
|
@ -660,7 +660,7 @@ __chp_add(struct subchannel_id schid, void *data)
|
|||
if (sch->driver && sch->driver->verify)
|
||||
sch->driver->verify(&sch->dev);
|
||||
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
put_device(&sch->dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -750,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
|
|||
if (!sch->ssd_info.valid)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
old_lpm = sch->lpm;
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
if (sch->ssd_info.chpid[chp] != chpid)
|
||||
|
@ -785,7 +785,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
|
|||
sch->driver->verify(&sch->dev);
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -143,11 +143,11 @@ cio_tpi(void)
|
|||
return 1;
|
||||
local_bh_disable();
|
||||
irq_enter ();
|
||||
spin_lock(&sch->lock);
|
||||
spin_lock(sch->lock);
|
||||
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
|
||||
if (sch->driver && sch->driver->irq)
|
||||
sch->driver->irq(&sch->dev);
|
||||
spin_unlock(&sch->lock);
|
||||
spin_unlock(sch->lock);
|
||||
irq_exit ();
|
||||
_local_bh_enable();
|
||||
return 1;
|
||||
|
@ -415,6 +415,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
|
|||
CIO_TRACE_EVENT (2, "ensch");
|
||||
CIO_TRACE_EVENT (2, sch->dev.bus_id);
|
||||
|
||||
if (sch_is_pseudo_sch(sch))
|
||||
return -EINVAL;
|
||||
ccode = stsch (sch->schid, &sch->schib);
|
||||
if (ccode)
|
||||
return -ENODEV;
|
||||
|
@ -462,6 +464,8 @@ cio_disable_subchannel (struct subchannel *sch)
|
|||
CIO_TRACE_EVENT (2, "dissch");
|
||||
CIO_TRACE_EVENT (2, sch->dev.bus_id);
|
||||
|
||||
if (sch_is_pseudo_sch(sch))
|
||||
return 0;
|
||||
ccode = stsch (sch->schid, &sch->schib);
|
||||
if (ccode == 3) /* Not operational. */
|
||||
return -ENODEV;
|
||||
|
@ -496,6 +500,15 @@ cio_disable_subchannel (struct subchannel *sch)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int cio_create_sch_lock(struct subchannel *sch)
|
||||
{
|
||||
sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
|
||||
if (!sch->lock)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(sch->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* cio_validate_subchannel()
|
||||
*
|
||||
|
@ -513,6 +526,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
|
|||
{
|
||||
char dbf_txt[15];
|
||||
int ccode;
|
||||
int err;
|
||||
|
||||
sprintf (dbf_txt, "valsch%x", schid.sch_no);
|
||||
CIO_TRACE_EVENT (4, dbf_txt);
|
||||
|
@ -520,9 +534,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
|
|||
/* Nuke all fields. */
|
||||
memset(sch, 0, sizeof(struct subchannel));
|
||||
|
||||
spin_lock_init(&sch->lock);
|
||||
sch->schid = schid;
|
||||
if (cio_is_console(schid)) {
|
||||
sch->lock = cio_get_console_lock();
|
||||
} else {
|
||||
err = cio_create_sch_lock(sch);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
mutex_init(&sch->reg_mutex);
|
||||
|
||||
/* Set a name for the subchannel */
|
||||
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
|
||||
schid.sch_no);
|
||||
|
@ -534,10 +554,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
|
|||
* is not valid.
|
||||
*/
|
||||
ccode = stsch_err (schid, &sch->schib);
|
||||
if (ccode)
|
||||
return (ccode == 3) ? -ENXIO : ccode;
|
||||
|
||||
sch->schid = schid;
|
||||
if (ccode) {
|
||||
err = (ccode == 3) ? -ENXIO : ccode;
|
||||
goto out;
|
||||
}
|
||||
/* Copy subchannel type from path management control word. */
|
||||
sch->st = sch->schib.pmcw.st;
|
||||
|
||||
|
@ -550,14 +570,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
|
|||
"non-I/O subchannel type %04X\n",
|
||||
sch->schid.ssid, sch->schid.sch_no, sch->st);
|
||||
/* We stop here for non-io subchannels. */
|
||||
return sch->st;
|
||||
err = sch->st;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Initialization for io subchannels. */
|
||||
if (!sch->schib.pmcw.dnv)
|
||||
if (!sch->schib.pmcw.dnv) {
|
||||
/* io subchannel but device number is invalid. */
|
||||
return -ENODEV;
|
||||
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
/* Devno is valid. */
|
||||
if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
|
||||
/*
|
||||
|
@ -567,7 +589,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
|
|||
CIO_MSG_EVENT(0, "Blacklisted device detected "
|
||||
"at devno %04X, subchannel set %x\n",
|
||||
sch->schib.pmcw.dev, sch->schid.ssid);
|
||||
return -ENODEV;
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
sch->opm = 0xff;
|
||||
if (!cio_is_console(sch->schid))
|
||||
|
@ -595,6 +618,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
|
|||
if ((sch->lpm & (sch->lpm - 1)) != 0)
|
||||
sch->schib.pmcw.mp = 1; /* multipath mode */
|
||||
return 0;
|
||||
out:
|
||||
if (!cio_is_console(schid))
|
||||
kfree(sch->lock);
|
||||
sch->lock = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -637,7 +665,7 @@ do_IRQ (struct pt_regs *regs)
|
|||
}
|
||||
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
|
||||
if (sch)
|
||||
spin_lock(&sch->lock);
|
||||
spin_lock(sch->lock);
|
||||
/* Store interrupt response block to lowcore. */
|
||||
if (tsch (tpi_info->schid, irb) == 0 && sch) {
|
||||
/* Keep subchannel information word up to date. */
|
||||
|
@ -648,7 +676,7 @@ do_IRQ (struct pt_regs *regs)
|
|||
sch->driver->irq(&sch->dev);
|
||||
}
|
||||
if (sch)
|
||||
spin_unlock(&sch->lock);
|
||||
spin_unlock(sch->lock);
|
||||
/*
|
||||
* Are more interrupts pending?
|
||||
* If so, the tpi instruction will update the lowcore
|
||||
|
@ -687,10 +715,10 @@ wait_cons_dev (void)
|
|||
__ctl_load (cr6, 6, 6);
|
||||
|
||||
do {
|
||||
spin_unlock(&console_subchannel.lock);
|
||||
spin_unlock(console_subchannel.lock);
|
||||
if (!cio_tpi())
|
||||
cpu_relax();
|
||||
spin_lock(&console_subchannel.lock);
|
||||
spin_lock(console_subchannel.lock);
|
||||
} while (console_subchannel.schib.scsw.actl != 0);
|
||||
/*
|
||||
* restore previous isc value
|
||||
|
|
|
@ -87,7 +87,7 @@ struct orb {
|
|||
/* subchannel data structure used by I/O subroutines */
|
||||
struct subchannel {
|
||||
struct subchannel_id schid;
|
||||
spinlock_t lock; /* subchannel lock */
|
||||
spinlock_t *lock; /* subchannel lock */
|
||||
struct mutex reg_mutex;
|
||||
enum {
|
||||
SUBCHANNEL_TYPE_IO = 0,
|
||||
|
@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int);
|
|||
extern int cio_get_options (struct subchannel *);
|
||||
extern int cio_modify (struct subchannel *);
|
||||
|
||||
int cio_create_sch_lock(struct subchannel *);
|
||||
|
||||
/* Use with care. */
|
||||
#ifdef CONFIG_CCW_CONSOLE
|
||||
extern struct subchannel *cio_probe_console(void);
|
||||
extern void cio_release_console(void);
|
||||
extern int cio_is_console(struct subchannel_id);
|
||||
extern struct subchannel *cio_get_console_subchannel(void);
|
||||
extern spinlock_t * cio_get_console_lock(void);
|
||||
#else
|
||||
#define cio_is_console(schid) 0
|
||||
#define cio_get_console_subchannel() NULL
|
||||
#define cio_get_console_lock() NULL;
|
||||
#endif
|
||||
|
||||
extern int cio_show_msg;
|
||||
|
|
|
@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch)
|
|||
/* Reset intparm to zeroes. */
|
||||
sch->schib.pmcw.intparm = 0;
|
||||
cio_modify(sch);
|
||||
kfree(sch->lock);
|
||||
kfree(sch);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev)
|
|||
struct subchannel *sch;
|
||||
|
||||
sch = to_subchannel(dev);
|
||||
if (!cio_is_console(sch->schid))
|
||||
if (!cio_is_console(sch->schid)) {
|
||||
kfree(sch->lock);
|
||||
kfree(sch);
|
||||
}
|
||||
}
|
||||
|
||||
extern int css_get_ssd_info(struct subchannel *sch);
|
||||
|
@ -135,14 +137,16 @@ css_register_subchannel(struct subchannel *sch)
|
|||
sch->dev.parent = &css[0]->device;
|
||||
sch->dev.bus = &css_bus_type;
|
||||
sch->dev.release = &css_subchannel_release;
|
||||
sch->dev.groups = subch_attr_groups;
|
||||
|
||||
/* make it known to the system */
|
||||
ret = css_sch_device_register(sch);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
printk (KERN_WARNING "%s: could not register %s\n",
|
||||
__func__, sch->dev.bus_id);
|
||||
else
|
||||
css_get_ssd_info(sch);
|
||||
return ret;
|
||||
}
|
||||
css_get_ssd_info(sch);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -201,18 +205,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
|
|||
unsigned long flags;
|
||||
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
|
||||
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
disc = device_is_disconnected(sch);
|
||||
if (disc && slow) {
|
||||
/* Disconnected devices are evaluated directly only.*/
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
/* No interrupt after machine check - kill pending timers. */
|
||||
device_kill_pending_timer(sch);
|
||||
if (!disc && !slow) {
|
||||
/* Non-disconnected devices are evaluated on the slow path. */
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
event = css_get_subchannel_status(sch);
|
||||
|
@ -237,9 +241,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
|
|||
/* Ask driver what to do with device. */
|
||||
action = UNREGISTER;
|
||||
if (sch->driver && sch->driver->notify) {
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
ret = sch->driver->notify(&sch->dev, event);
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
if (ret)
|
||||
action = NONE;
|
||||
}
|
||||
|
@ -264,9 +268,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
|
|||
case UNREGISTER:
|
||||
case UNREGISTER_PROBE:
|
||||
/* Unregister device (will use subchannel lock). */
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
css_sch_device_unregister(sch);
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
|
||||
/* Reset intparm to zeroes. */
|
||||
sch->schib.pmcw.intparm = 0;
|
||||
|
@ -278,7 +282,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
/* Probe if necessary. */
|
||||
if (action == UNREGISTER_PROBE)
|
||||
ret = css_probe_device(sch->schid);
|
||||
|
@ -573,12 +577,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
|
||||
|
||||
static inline void __init
|
||||
setup_css(int nr)
|
||||
static inline int __init setup_css(int nr)
|
||||
{
|
||||
u32 tod_high;
|
||||
int ret;
|
||||
|
||||
memset(css[nr], 0, sizeof(struct channel_subsystem));
|
||||
css[nr]->pseudo_subchannel =
|
||||
kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
|
||||
if (!css[nr]->pseudo_subchannel)
|
||||
return -ENOMEM;
|
||||
css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
|
||||
css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
|
||||
sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
|
||||
ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
|
||||
if (ret) {
|
||||
kfree(css[nr]->pseudo_subchannel);
|
||||
return ret;
|
||||
}
|
||||
mutex_init(&css[nr]->mutex);
|
||||
css[nr]->valid = 1;
|
||||
css[nr]->cssid = nr;
|
||||
|
@ -586,6 +602,7 @@ setup_css(int nr)
|
|||
css[nr]->device.release = channel_subsystem_release;
|
||||
tod_high = (u32) (get_clock() >> 32);
|
||||
css_generate_pgid(css[nr], tod_high);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -622,10 +639,12 @@ init_channel_subsystem (void)
|
|||
ret = -ENOMEM;
|
||||
goto out_unregister;
|
||||
}
|
||||
setup_css(i);
|
||||
ret = device_register(&css[i]->device);
|
||||
ret = setup_css(i);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
ret = device_register(&css[i]->device);
|
||||
if (ret)
|
||||
goto out_free_all;
|
||||
if (css_characteristics_avail &&
|
||||
css_chsc_characteristics.secm) {
|
||||
ret = device_create_file(&css[i]->device,
|
||||
|
@ -633,6 +652,9 @@ init_channel_subsystem (void)
|
|||
if (ret)
|
||||
goto out_device;
|
||||
}
|
||||
ret = device_register(&css[i]->pseudo_subchannel->dev);
|
||||
if (ret)
|
||||
goto out_file;
|
||||
}
|
||||
css_init_done = 1;
|
||||
|
||||
|
@ -640,13 +662,19 @@ init_channel_subsystem (void)
|
|||
|
||||
for_each_subchannel(__init_channel_subsystem, NULL);
|
||||
return 0;
|
||||
out_file:
|
||||
device_remove_file(&css[i]->device, &dev_attr_cm_enable);
|
||||
out_device:
|
||||
device_unregister(&css[i]->device);
|
||||
out_free_all:
|
||||
kfree(css[i]->pseudo_subchannel->lock);
|
||||
kfree(css[i]->pseudo_subchannel);
|
||||
out_free:
|
||||
kfree(css[i]);
|
||||
out_unregister:
|
||||
while (i > 0) {
|
||||
i--;
|
||||
device_unregister(&css[i]->pseudo_subchannel->dev);
|
||||
if (css_characteristics_avail && css_chsc_characteristics.secm)
|
||||
device_remove_file(&css[i]->device,
|
||||
&dev_attr_cm_enable);
|
||||
|
@ -658,6 +686,11 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int sch_is_pseudo_sch(struct subchannel *sch)
|
||||
{
|
||||
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
|
||||
}
|
||||
|
||||
/*
|
||||
* find a driver for a subchannel. They identify by the subchannel
|
||||
* type with the exception that the console subchannel driver has its own
|
||||
|
|
|
@ -73,6 +73,8 @@ struct senseid {
|
|||
} __attribute__ ((packed,aligned(4)));
|
||||
|
||||
struct ccw_device_private {
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
int state; /* device state */
|
||||
atomic_t onoff;
|
||||
unsigned long registered;
|
||||
|
@ -158,6 +160,8 @@ struct channel_subsystem {
|
|||
int cm_enabled;
|
||||
void *cub_addr1;
|
||||
void *cub_addr2;
|
||||
/* for orphaned ccw devices */
|
||||
struct subchannel *pseudo_subchannel;
|
||||
};
|
||||
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
|
||||
|
||||
|
@ -185,6 +189,11 @@ void css_clear_subchannel_slow_list(void);
|
|||
int css_slow_subchannels_exist(void);
|
||||
extern int need_rescan;
|
||||
|
||||
int sch_is_pseudo_sch(struct subchannel *);
|
||||
|
||||
extern struct workqueue_struct *slow_path_wq;
|
||||
extern struct work_struct slow_path_work;
|
||||
|
||||
int subchannel_add_files (struct device *);
|
||||
extern struct attribute_group *subch_attr_groups[];
|
||||
#endif
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/param.h> /* HZ */
|
||||
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "css.h"
|
||||
#include "device.h"
|
||||
#include "ioasm.h"
|
||||
|
@ -234,9 +235,11 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
|
|||
ssize_t ret = 0;
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
|
||||
|
||||
if (ssd)
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
|
||||
else
|
||||
ret += sprintf (buf, "n/a");
|
||||
ret += sprintf (buf+ret, "\n");
|
||||
return min((ssize_t)PAGE_SIZE, ret);
|
||||
}
|
||||
|
@ -294,14 +297,44 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf)
|
|||
return sprintf(buf, cdev->online ? "1\n" : "0\n");
|
||||
}
|
||||
|
||||
int ccw_device_is_orphan(struct ccw_device *cdev)
|
||||
{
|
||||
return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
|
||||
}
|
||||
|
||||
static void ccw_device_unregister(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
if (test_and_clear_bit(1, &cdev->private->registered))
|
||||
device_unregister(&cdev->dev);
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
|
||||
static void
|
||||
ccw_device_remove_disconnected(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
unsigned long flags;
|
||||
/*
|
||||
* Forced offline in disconnected state means
|
||||
* 'throw away device'.
|
||||
*/
|
||||
if (ccw_device_is_orphan(cdev)) {
|
||||
/* Deregister ccw device. */
|
||||
spin_lock_irqsave(cdev->ccwlock, flags);
|
||||
cdev->private->state = DEV_STATE_NOT_OPER;
|
||||
spin_unlock_irqrestore(cdev->ccwlock, flags);
|
||||
if (get_device(&cdev->dev)) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_unregister);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
}
|
||||
return ;
|
||||
}
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
css_sch_device_unregister(sch);
|
||||
/* Reset intparm to zeroes. */
|
||||
|
@ -462,6 +495,8 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
|
|||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct subchannel *sch;
|
||||
|
||||
if (ccw_device_is_orphan(cdev))
|
||||
return sprintf(buf, "no device\n");
|
||||
switch (cdev->private->state) {
|
||||
case DEV_STATE_BOXED:
|
||||
return sprintf(buf, "boxed\n");
|
||||
|
@ -498,11 +533,10 @@ static struct attribute_group subch_attr_group = {
|
|||
.attrs = subch_attrs,
|
||||
};
|
||||
|
||||
static inline int
|
||||
subchannel_add_files (struct device *dev)
|
||||
{
|
||||
return sysfs_create_group(&dev->kobj, &subch_attr_group);
|
||||
}
|
||||
struct attribute_group *subch_attr_groups[] = {
|
||||
&subch_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute * ccwdev_attrs[] = {
|
||||
&dev_attr_devtype.attr,
|
||||
|
@ -563,11 +597,10 @@ match_devno(struct device * dev, void * data)
|
|||
|
||||
cdev = to_ccwdev(dev);
|
||||
if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
|
||||
!ccw_device_is_orphan(cdev) &&
|
||||
ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
|
||||
(cdev != d->sibling)) {
|
||||
cdev->private->state = DEV_STATE_NOT_OPER;
|
||||
(cdev != d->sibling))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -584,13 +617,36 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
|
|||
return dev ? to_ccwdev(dev) : NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ccw_device_add_changed(void *data)
|
||||
static int match_orphan(struct device *dev, void *data)
|
||||
{
|
||||
|
||||
struct ccw_dev_id *dev_id;
|
||||
struct ccw_device *cdev;
|
||||
|
||||
cdev = data;
|
||||
dev_id = data;
|
||||
cdev = to_ccwdev(dev);
|
||||
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
|
||||
}
|
||||
|
||||
static struct ccw_device *
|
||||
get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
|
||||
struct ccw_dev_id *dev_id)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
|
||||
match_orphan);
|
||||
|
||||
return dev ? to_ccwdev(dev) : NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ccw_device_add_changed(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
if (device_add(&cdev->dev)) {
|
||||
put_device(&cdev->dev);
|
||||
return;
|
||||
|
@ -602,64 +658,21 @@ ccw_device_add_changed(void *data)
|
|||
}
|
||||
}
|
||||
|
||||
extern int css_get_ssd_info(struct subchannel *sch);
|
||||
|
||||
void
|
||||
ccw_device_do_unreg_rereg(void *data)
|
||||
void ccw_device_do_unreg_rereg(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
int need_rename;
|
||||
|
||||
cdev = data;
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
|
||||
/*
|
||||
* The device number has changed. This is usually only when
|
||||
* a device has been detached under VM and then re-appeared
|
||||
* on another subchannel because of a different attachment
|
||||
* order than before. Ideally, we should should just switch
|
||||
* subchannels, but unfortunately, this is not possible with
|
||||
* the current implementation.
|
||||
* Instead, we search for the old subchannel for this device
|
||||
* number and deregister so there are no collisions with the
|
||||
* newly registered ccw_device.
|
||||
* FIXME: Find another solution so the block layer doesn't
|
||||
* get possibly sick...
|
||||
*/
|
||||
struct ccw_device *other_cdev;
|
||||
struct ccw_dev_id dev_id;
|
||||
|
||||
need_rename = 1;
|
||||
dev_id.devno = sch->schib.pmcw.dev;
|
||||
dev_id.ssid = sch->schid.ssid;
|
||||
other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
|
||||
if (other_cdev) {
|
||||
struct subchannel *other_sch;
|
||||
|
||||
other_sch = to_subchannel(other_cdev->dev.parent);
|
||||
if (get_device(&other_sch->dev)) {
|
||||
stsch(other_sch->schid, &other_sch->schib);
|
||||
if (other_sch->schib.pmcw.dnv) {
|
||||
other_sch->schib.pmcw.intparm = 0;
|
||||
cio_modify(other_sch);
|
||||
}
|
||||
css_sch_device_unregister(other_sch);
|
||||
}
|
||||
}
|
||||
/* Update ssd info here. */
|
||||
css_get_ssd_info(sch);
|
||||
cdev->private->dev_id.devno = sch->schib.pmcw.dev;
|
||||
} else
|
||||
need_rename = 0;
|
||||
device_remove_files(&cdev->dev);
|
||||
if (test_and_clear_bit(1, &cdev->private->registered))
|
||||
device_del(&cdev->dev);
|
||||
if (need_rename)
|
||||
snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
|
||||
sch->schid.ssid, sch->schib.pmcw.dev);
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_add_changed, cdev);
|
||||
ccw_device_add_changed);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
}
|
||||
|
||||
|
@ -673,18 +686,194 @@ ccw_device_release(struct device *dev)
|
|||
kfree(cdev);
|
||||
}
|
||||
|
||||
static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
|
||||
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
|
||||
if (cdev) {
|
||||
cdev->private = kzalloc(sizeof(struct ccw_device_private),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (cdev->private)
|
||||
return cdev;
|
||||
}
|
||||
kfree(cdev);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int io_subchannel_initialize_dev(struct subchannel *sch,
|
||||
struct ccw_device *cdev)
|
||||
{
|
||||
cdev->private->cdev = cdev;
|
||||
atomic_set(&cdev->private->onoff, 0);
|
||||
cdev->dev.parent = &sch->dev;
|
||||
cdev->dev.release = ccw_device_release;
|
||||
INIT_LIST_HEAD(&cdev->private->kick_work.entry);
|
||||
/* Do first half of device_register. */
|
||||
device_initialize(&cdev->dev);
|
||||
if (!get_device(&sch->dev)) {
|
||||
if (cdev->dev.release)
|
||||
cdev->dev.release(&cdev->dev);
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
int ret;
|
||||
|
||||
cdev = io_subchannel_allocate_dev(sch);
|
||||
if (!IS_ERR(cdev)) {
|
||||
ret = io_subchannel_initialize_dev(sch, cdev);
|
||||
if (ret) {
|
||||
kfree(cdev);
|
||||
cdev = ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
return cdev;
|
||||
}
|
||||
|
||||
static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
|
||||
|
||||
static void sch_attach_device(struct subchannel *sch,
|
||||
struct ccw_device *cdev)
|
||||
{
|
||||
spin_lock_irq(sch->lock);
|
||||
sch->dev.driver_data = cdev;
|
||||
cdev->private->schid = sch->schid;
|
||||
cdev->ccwlock = sch->lock;
|
||||
device_trigger_reprobe(sch);
|
||||
spin_unlock_irq(sch->lock);
|
||||
}
|
||||
|
||||
static void sch_attach_disconnected_device(struct subchannel *sch,
|
||||
struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *other_sch;
|
||||
int ret;
|
||||
|
||||
other_sch = to_subchannel(get_device(cdev->dev.parent));
|
||||
ret = device_move(&cdev->dev, &sch->dev);
|
||||
if (ret) {
|
||||
CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed "
|
||||
"(ret=%d)!\n", cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, ret);
|
||||
put_device(&other_sch->dev);
|
||||
return;
|
||||
}
|
||||
other_sch->dev.driver_data = NULL;
|
||||
/* No need to keep a subchannel without ccw device around. */
|
||||
css_sch_device_unregister(other_sch);
|
||||
put_device(&other_sch->dev);
|
||||
sch_attach_device(sch, cdev);
|
||||
}
|
||||
|
||||
static void sch_attach_orphaned_device(struct subchannel *sch,
|
||||
struct ccw_device *cdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Try to move the ccw device to its new subchannel. */
|
||||
ret = device_move(&cdev->dev, &sch->dev);
|
||||
if (ret) {
|
||||
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
|
||||
"failed (ret=%d)!\n",
|
||||
cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, ret);
|
||||
return;
|
||||
}
|
||||
sch_attach_device(sch, cdev);
|
||||
}
|
||||
|
||||
static void sch_create_and_recog_new_device(struct subchannel *sch)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
|
||||
/* Need to allocate a new ccw device. */
|
||||
cdev = io_subchannel_create_ccwdev(sch);
|
||||
if (IS_ERR(cdev)) {
|
||||
/* OK, we did everything we could... */
|
||||
css_sch_device_unregister(sch);
|
||||
return;
|
||||
}
|
||||
spin_lock_irq(sch->lock);
|
||||
sch->dev.driver_data = cdev;
|
||||
spin_unlock_irq(sch->lock);
|
||||
/* Start recognition for the new ccw device. */
|
||||
if (io_subchannel_recog(cdev, sch)) {
|
||||
spin_lock_irq(sch->lock);
|
||||
sch->dev.driver_data = NULL;
|
||||
spin_unlock_irq(sch->lock);
|
||||
if (cdev->dev.release)
|
||||
cdev->dev.release(&cdev->dev);
|
||||
css_sch_device_unregister(sch);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ccw_device_move_to_orphanage(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
struct ccw_device *replacing_cdev;
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
struct channel_subsystem *css;
|
||||
struct ccw_dev_id dev_id;
|
||||
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
css = to_css(sch->dev.parent);
|
||||
dev_id.devno = sch->schib.pmcw.dev;
|
||||
dev_id.ssid = sch->schid.ssid;
|
||||
|
||||
/*
|
||||
* Move the orphaned ccw device to the orphanage so the replacing
|
||||
* ccw device can take its place on the subchannel.
|
||||
*/
|
||||
ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
|
||||
if (ret) {
|
||||
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
|
||||
"(ret=%d)!\n", cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, ret);
|
||||
return;
|
||||
}
|
||||
cdev->ccwlock = css->pseudo_subchannel->lock;
|
||||
/*
|
||||
* Search for the replacing ccw device
|
||||
* - among the disconnected devices
|
||||
* - in the orphanage
|
||||
*/
|
||||
replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
|
||||
if (replacing_cdev) {
|
||||
sch_attach_disconnected_device(sch, replacing_cdev);
|
||||
return;
|
||||
}
|
||||
replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
|
||||
if (replacing_cdev) {
|
||||
sch_attach_orphaned_device(sch, replacing_cdev);
|
||||
return;
|
||||
}
|
||||
sch_create_and_recog_new_device(sch);
|
||||
}
|
||||
|
||||
/*
|
||||
* Register recognized device.
|
||||
*/
|
||||
static void
|
||||
io_subchannel_register(void *data)
|
||||
io_subchannel_register(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
cdev = data;
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
|
||||
/*
|
||||
|
@ -709,9 +898,9 @@ io_subchannel_register(void *data)
|
|||
printk (KERN_WARNING "%s: could not register %s\n",
|
||||
__func__, cdev->dev.bus_id);
|
||||
put_device(&cdev->dev);
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
sch->dev.driver_data = NULL;
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
kfree (cdev->private);
|
||||
kfree (cdev);
|
||||
put_device(&sch->dev);
|
||||
|
@ -719,11 +908,6 @@ io_subchannel_register(void *data)
|
|||
wake_up(&ccw_device_init_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = subchannel_add_files(cdev->dev.parent);
|
||||
if (ret)
|
||||
printk(KERN_WARNING "%s: could not add attributes to %s\n",
|
||||
__func__, sch->dev.bus_id);
|
||||
put_device(&cdev->dev);
|
||||
out:
|
||||
cdev->private->flags.recog_done = 1;
|
||||
|
@ -734,11 +918,14 @@ out:
|
|||
}
|
||||
|
||||
void
|
||||
ccw_device_call_sch_unregister(void *data)
|
||||
ccw_device_call_sch_unregister(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device *cdev = data;
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
css_sch_device_unregister(sch);
|
||||
/* Reset intparm to zeroes. */
|
||||
|
@ -768,7 +955,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
|
|||
break;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_call_sch_unregister, cdev);
|
||||
ccw_device_call_sch_unregister);
|
||||
queue_work(slow_path_wq, &cdev->private->kick_work);
|
||||
if (atomic_dec_and_test(&ccw_device_init_count))
|
||||
wake_up(&ccw_device_init_wq);
|
||||
|
@ -783,7 +970,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
|
|||
if (!get_device(&cdev->dev))
|
||||
break;
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
io_subchannel_register, cdev);
|
||||
io_subchannel_register);
|
||||
queue_work(slow_path_wq, &cdev->private->kick_work);
|
||||
break;
|
||||
}
|
||||
|
@ -797,7 +984,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
|
|||
|
||||
sch->dev.driver_data = cdev;
|
||||
sch->driver = &io_subchannel_driver;
|
||||
cdev->ccwlock = &sch->lock;
|
||||
cdev->ccwlock = sch->lock;
|
||||
|
||||
/* Init private data. */
|
||||
priv = cdev->private;
|
||||
|
@ -817,9 +1004,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
|
|||
atomic_inc(&ccw_device_init_count);
|
||||
|
||||
/* Start async. device sensing. */
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
rc = ccw_device_recognition(cdev);
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
if (rc) {
|
||||
if (atomic_dec_and_test(&ccw_device_init_count))
|
||||
wake_up(&ccw_device_init_wq);
|
||||
|
@ -827,12 +1014,55 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void ccw_device_move_to_sch(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
int rc;
|
||||
struct subchannel *sch;
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *former_parent;
|
||||
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
sch = priv->sch;
|
||||
cdev = priv->cdev;
|
||||
former_parent = ccw_device_is_orphan(cdev) ?
|
||||
NULL : to_subchannel(get_device(cdev->dev.parent));
|
||||
mutex_lock(&sch->reg_mutex);
|
||||
/* Try to move the ccw device to its new subchannel. */
|
||||
rc = device_move(&cdev->dev, &sch->dev);
|
||||
mutex_unlock(&sch->reg_mutex);
|
||||
if (rc) {
|
||||
CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel "
|
||||
"0.%x.%04x failed (ret=%d)!\n",
|
||||
cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, sch->schid.ssid,
|
||||
sch->schid.sch_no, rc);
|
||||
css_sch_device_unregister(sch);
|
||||
goto out;
|
||||
}
|
||||
if (former_parent) {
|
||||
spin_lock_irq(former_parent->lock);
|
||||
former_parent->dev.driver_data = NULL;
|
||||
spin_unlock_irq(former_parent->lock);
|
||||
css_sch_device_unregister(former_parent);
|
||||
/* Reset intparm to zeroes. */
|
||||
former_parent->schib.pmcw.intparm = 0;
|
||||
cio_modify(former_parent);
|
||||
}
|
||||
sch_attach_device(sch, cdev);
|
||||
out:
|
||||
if (former_parent)
|
||||
put_device(&former_parent->dev);
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
|
||||
static int
|
||||
io_subchannel_probe (struct subchannel *sch)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
struct ccw_dev_id dev_id;
|
||||
|
||||
if (sch->dev.driver_data) {
|
||||
/*
|
||||
|
@ -843,7 +1073,6 @@ io_subchannel_probe (struct subchannel *sch)
|
|||
cdev = sch->dev.driver_data;
|
||||
device_initialize(&cdev->dev);
|
||||
ccw_device_register(cdev);
|
||||
subchannel_add_files(&sch->dev);
|
||||
/*
|
||||
* Check if the device is already online. If it is
|
||||
* the reference count needs to be corrected
|
||||
|
@ -856,33 +1085,37 @@ io_subchannel_probe (struct subchannel *sch)
|
|||
get_device(&cdev->dev);
|
||||
return 0;
|
||||
}
|
||||
cdev = kzalloc (sizeof(*cdev), GFP_KERNEL);
|
||||
/*
|
||||
* First check if a fitting device may be found amongst the
|
||||
* disconnected devices or in the orphanage.
|
||||
*/
|
||||
dev_id.devno = sch->schib.pmcw.dev;
|
||||
dev_id.ssid = sch->schid.ssid;
|
||||
cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
|
||||
if (!cdev)
|
||||
return -ENOMEM;
|
||||
cdev->private = kzalloc(sizeof(struct ccw_device_private),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!cdev->private) {
|
||||
kfree(cdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
atomic_set(&cdev->private->onoff, 0);
|
||||
cdev->dev.parent = &sch->dev;
|
||||
cdev->dev.release = ccw_device_release;
|
||||
INIT_LIST_HEAD(&cdev->private->kick_work.entry);
|
||||
/* Do first half of device_register. */
|
||||
device_initialize(&cdev->dev);
|
||||
|
||||
if (!get_device(&sch->dev)) {
|
||||
if (cdev->dev.release)
|
||||
cdev->dev.release(&cdev->dev);
|
||||
return -ENODEV;
|
||||
cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
|
||||
&dev_id);
|
||||
if (cdev) {
|
||||
/*
|
||||
* Schedule moving the device until when we have a registered
|
||||
* subchannel to move to and succeed the probe. We can
|
||||
* unregister later again, when the probe is through.
|
||||
*/
|
||||
cdev->private->sch = sch;
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_move_to_sch);
|
||||
queue_work(slow_path_wq, &cdev->private->kick_work);
|
||||
return 0;
|
||||
}
|
||||
cdev = io_subchannel_create_ccwdev(sch);
|
||||
if (IS_ERR(cdev))
|
||||
return PTR_ERR(cdev);
|
||||
|
||||
rc = io_subchannel_recog(cdev, sch);
|
||||
if (rc) {
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
sch->dev.driver_data = NULL;
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
if (cdev->dev.release)
|
||||
cdev->dev.release(&cdev->dev);
|
||||
}
|
||||
|
@ -890,17 +1123,6 @@ io_subchannel_probe (struct subchannel *sch)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
ccw_device_unregister(void *data)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
|
||||
cdev = (struct ccw_device *)data;
|
||||
if (test_and_clear_bit(1, &cdev->private->registered))
|
||||
device_unregister(&cdev->dev);
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
|
||||
static int
|
||||
io_subchannel_remove (struct subchannel *sch)
|
||||
{
|
||||
|
@ -921,7 +1143,7 @@ io_subchannel_remove (struct subchannel *sch)
|
|||
*/
|
||||
if (get_device(&cdev->dev)) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_unregister, cdev);
|
||||
ccw_device_unregister);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
}
|
||||
return 0;
|
||||
|
@ -1003,6 +1225,13 @@ static struct ccw_device console_cdev;
|
|||
static struct ccw_device_private console_private;
|
||||
static int console_cdev_in_use;
|
||||
|
||||
static DEFINE_SPINLOCK(ccw_console_lock);
|
||||
|
||||
spinlock_t * cio_get_console_lock(void)
|
||||
{
|
||||
return &ccw_console_lock;
|
||||
}
|
||||
|
||||
static int
|
||||
ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
|
||||
{
|
||||
|
@ -1048,6 +1277,7 @@ ccw_device_probe_console(void)
|
|||
memset(&console_cdev, 0, sizeof(struct ccw_device));
|
||||
memset(&console_private, 0, sizeof(struct ccw_device_private));
|
||||
console_cdev.private = &console_private;
|
||||
console_private.cdev = &console_cdev;
|
||||
ret = ccw_device_console_enable(&console_cdev, sch);
|
||||
if (ret) {
|
||||
cio_release_console();
|
||||
|
|
|
@ -78,8 +78,10 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
|
|||
|
||||
int ccw_device_cancel_halt_clear(struct ccw_device *);
|
||||
|
||||
void ccw_device_do_unreg_rereg(void *);
|
||||
void ccw_device_call_sch_unregister(void *);
|
||||
void ccw_device_do_unreg_rereg(struct work_struct *);
|
||||
void ccw_device_call_sch_unregister(struct work_struct *);
|
||||
void ccw_device_move_to_orphanage(struct work_struct *);
|
||||
int ccw_device_is_orphan(struct ccw_device *);
|
||||
|
||||
int ccw_device_recognition(struct ccw_device *);
|
||||
int ccw_device_online(struct ccw_device *);
|
||||
|
|
|
@ -186,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev)
|
|||
/*
|
||||
* Check if cu type and device type still match. If
|
||||
* not, it is certainly another device and we have to
|
||||
* de- and re-register. Also check here for non-matching devno.
|
||||
* de- and re-register.
|
||||
*/
|
||||
if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
|
||||
cdev->id.cu_model != cdev->private->senseid.cu_model ||
|
||||
cdev->id.dev_type != cdev->private->senseid.dev_type ||
|
||||
cdev->id.dev_model != cdev->private->senseid.dev_model ||
|
||||
cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
|
||||
cdev->id.dev_model != cdev->private->senseid.dev_model) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_do_unreg_rereg, cdev);
|
||||
ccw_device_do_unreg_rereg);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
return 0;
|
||||
}
|
||||
|
@ -329,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
|
|||
}
|
||||
|
||||
static void
|
||||
ccw_device_oper_notify(void *data)
|
||||
ccw_device_oper_notify(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
|
||||
cdev = data;
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
ret = (sch->driver && sch->driver->notify) ?
|
||||
sch->driver->notify(&sch->dev, CIO_OPER) : 0;
|
||||
if (!ret)
|
||||
/* Driver doesn't want device back. */
|
||||
ccw_device_do_unreg_rereg(cdev);
|
||||
ccw_device_do_unreg_rereg(work);
|
||||
else {
|
||||
/* Reenable channel measurements, if needed. */
|
||||
cmf_reenable(cdev);
|
||||
|
@ -377,8 +378,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
|
|||
|
||||
if (cdev->private->flags.donotify) {
|
||||
cdev->private->flags.donotify = 0;
|
||||
PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
|
||||
cdev);
|
||||
PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
|
||||
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
||||
}
|
||||
wake_up(&cdev->private->wait_q);
|
||||
|
@ -528,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
|
|||
|
||||
|
||||
static void
|
||||
ccw_device_nopath_notify(void *data)
|
||||
ccw_device_nopath_notify(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
|
||||
cdev = data;
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
/* Extra sanity. */
|
||||
if (sch->lpm)
|
||||
|
@ -547,8 +549,7 @@ ccw_device_nopath_notify(void *data)
|
|||
cio_disable_subchannel(sch);
|
||||
if (get_device(&cdev->dev)) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_call_sch_unregister,
|
||||
cdev);
|
||||
ccw_device_call_sch_unregister);
|
||||
queue_work(ccw_device_work,
|
||||
&cdev->private->kick_work);
|
||||
} else
|
||||
|
@ -607,7 +608,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
|
|||
/* Reset oper notify indication after verify error. */
|
||||
cdev->private->flags.donotify = 0;
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_nopath_notify, cdev);
|
||||
ccw_device_nopath_notify);
|
||||
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
||||
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
|
||||
break;
|
||||
|
@ -674,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev)
|
|||
{
|
||||
struct subchannel *sch;
|
||||
|
||||
if (ccw_device_is_orphan(cdev)) {
|
||||
ccw_device_done(cdev, DEV_STATE_OFFLINE);
|
||||
return 0;
|
||||
}
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
|
||||
return -ENODEV;
|
||||
|
@ -738,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
|
|||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (get_device(&cdev->dev)) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_call_sch_unregister, cdev);
|
||||
ccw_device_call_sch_unregister);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
}
|
||||
wake_up(&cdev->private->wait_q);
|
||||
|
@ -769,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
|
|||
}
|
||||
if (get_device(&cdev->dev)) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_call_sch_unregister, cdev);
|
||||
ccw_device_call_sch_unregister);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
}
|
||||
wake_up(&cdev->private->wait_q);
|
||||
|
@ -874,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
|
|||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->lpm) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_nopath_notify, cdev);
|
||||
ccw_device_nopath_notify);
|
||||
queue_work(ccw_device_notify_work,
|
||||
&cdev->private->kick_work);
|
||||
} else
|
||||
|
@ -969,7 +974,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
|
|||
ERR_PTR(-EIO));
|
||||
if (!sch->lpm) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_nopath_notify, cdev);
|
||||
ccw_device_nopath_notify);
|
||||
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
||||
} else if (cdev->private->flags.doverify)
|
||||
/* Start delayed path verification. */
|
||||
|
@ -992,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
|
|||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->lpm) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_nopath_notify, cdev);
|
||||
ccw_device_nopath_notify);
|
||||
queue_work(ccw_device_notify_work,
|
||||
&cdev->private->kick_work);
|
||||
} else
|
||||
|
@ -1021,7 +1026,7 @@ void device_kill_io(struct subchannel *sch)
|
|||
if (ret == -ENODEV) {
|
||||
if (!sch->lpm) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_nopath_notify, cdev);
|
||||
ccw_device_nopath_notify);
|
||||
queue_work(ccw_device_notify_work,
|
||||
&cdev->private->kick_work);
|
||||
} else
|
||||
|
@ -1033,7 +1038,7 @@ void device_kill_io(struct subchannel *sch)
|
|||
ERR_PTR(-EIO));
|
||||
if (!sch->lpm) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_nopath_notify, cdev);
|
||||
ccw_device_nopath_notify);
|
||||
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
||||
} else
|
||||
/* Start delayed path verification. */
|
||||
|
@ -1104,7 +1109,8 @@ device_trigger_reprobe(struct subchannel *sch)
|
|||
/* Update some values. */
|
||||
if (stsch(sch->schid, &sch->schib))
|
||||
return;
|
||||
|
||||
if (!sch->schib.pmcw.dnv)
|
||||
return;
|
||||
/*
|
||||
* The pim, pam, pom values may not be accurate, but they are the best
|
||||
* we have before performing device selection :/
|
||||
|
@ -1118,7 +1124,13 @@ device_trigger_reprobe(struct subchannel *sch)
|
|||
sch->schib.pmcw.mp = 1;
|
||||
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
|
||||
/* We should also udate ssd info, but this has to wait. */
|
||||
ccw_device_start_id(cdev, 0);
|
||||
/* Check if this is another device which appeared on the same sch. */
|
||||
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
ccw_device_move_to_orphanage);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
} else
|
||||
ccw_device_start_id(cdev, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
|
|||
ccw_device_set_timeout(cdev, 0);
|
||||
if (ret == -EBUSY) {
|
||||
/* Try again later. */
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
msleep(10);
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
continue;
|
||||
}
|
||||
if (ret != 0)
|
||||
|
@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
|
|||
break;
|
||||
/* Wait for end of request. */
|
||||
cdev->private->intparm = magic;
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
wait_event(cdev->private->wait_q,
|
||||
(cdev->private->intparm == -EIO) ||
|
||||
(cdev->private->intparm == -EAGAIN) ||
|
||||
(cdev->private->intparm == 0));
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
/* Check at least for channel end / device end */
|
||||
if (cdev->private->intparm == -EIO) {
|
||||
/* Non-retryable error. */
|
||||
|
@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
|
|||
/* Success. */
|
||||
break;
|
||||
/* Try again later. */
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
msleep(10);
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
} while (1);
|
||||
|
||||
return ret;
|
||||
|
@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
/* Save interrupt handler. */
|
||||
handler = cdev->handler;
|
||||
/* Temporarily install own handler. */
|
||||
|
@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
|
|||
|
||||
/* Restore interrupt handler. */
|
||||
cdev->handler = handler;
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
|
||||
clear_normalized_cda (rdc_ccw);
|
||||
kfree(rdc_ccw);
|
||||
|
@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
|
|||
rcd_ccw->count = ciw->count;
|
||||
rcd_ccw->flags = CCW_FLAG_SLI;
|
||||
|
||||
spin_lock_irq(&sch->lock);
|
||||
spin_lock_irq(sch->lock);
|
||||
/* Save interrupt handler. */
|
||||
handler = cdev->handler;
|
||||
/* Temporarily install own handler. */
|
||||
|
@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
|
|||
|
||||
/* Restore interrupt handler. */
|
||||
cdev->handler = handler;
|
||||
spin_unlock_irq(&sch->lock);
|
||||
spin_unlock_irq(sch->lock);
|
||||
|
||||
/*
|
||||
* on success we update the user input parms
|
||||
|
@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev)
|
|||
kfree(buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
ret = cio_enable_subchannel(sch, 3);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev)
|
|||
goto out_unlock;
|
||||
}
|
||||
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
cio_disable_subchannel(sch); //FIXME: return code?
|
||||
if ((cdev->private->irb.scsw.dstat !=
|
||||
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
|
||||
|
@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev)
|
|||
out_unlock:
|
||||
kfree(buf);
|
||||
kfree(buf2);
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <asm/timex.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
#include <asm/s390_rdev.h>
|
||||
#include <asm/qdio.h>
|
||||
|
||||
#include "cio.h"
|
||||
|
@ -65,12 +66,12 @@ MODULE_LICENSE("GPL");
|
|||
/******************** HERE WE GO ***********************************/
|
||||
|
||||
static const char version[] = "QDIO base support version 2";
|
||||
extern struct bus_type ccw_bus_type;
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
static int qdio_performance_stats = 0;
|
||||
static int proc_perf_file_registration;
|
||||
static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
|
||||
static struct qdio_perf_stats perf_stats;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
|
||||
static int hydra_thinints;
|
||||
static int is_passthrough = 0;
|
||||
|
@ -275,9 +276,8 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
|
|||
QDIO_DBF_TEXT4(0,trace,"sigasync");
|
||||
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.siga_syncs++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.siga_syncs++;
|
||||
|
||||
cc = do_siga_sync(q->schid, gpr2, gpr3);
|
||||
if (cc)
|
||||
|
@ -322,9 +322,8 @@ qdio_siga_output(struct qdio_q *q)
|
|||
__u32 busy_bit;
|
||||
__u64 start_time=0;
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.siga_outs++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.siga_outs++;
|
||||
|
||||
QDIO_DBF_TEXT4(0,trace,"sigaout");
|
||||
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
|
||||
|
@ -358,9 +357,8 @@ qdio_siga_input(struct qdio_q *q)
|
|||
QDIO_DBF_TEXT4(0,trace,"sigain");
|
||||
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.siga_ins++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.siga_ins++;
|
||||
|
||||
cc = do_siga_input(q->schid, q->mask);
|
||||
|
||||
|
@ -954,9 +952,8 @@ __qdio_outbound_processing(struct qdio_q *q)
|
|||
|
||||
if (unlikely(qdio_reserve_q(q))) {
|
||||
qdio_release_q(q);
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
o_p_c++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
o_p_c++;
|
||||
/* as we're sissies, we'll check next time */
|
||||
if (likely(!atomic_read(&q->is_in_shutdown))) {
|
||||
qdio_mark_q(q);
|
||||
|
@ -964,10 +961,10 @@ __qdio_outbound_processing(struct qdio_q *q)
|
|||
}
|
||||
return;
|
||||
}
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
o_p_nc++;
|
||||
perf_stats.tl_runs++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
o_p_nc++;
|
||||
perf_stats.tl_runs++;
|
||||
}
|
||||
|
||||
/* see comment in qdio_kick_outbound_q */
|
||||
siga_attempts=atomic_read(&q->busy_siga_counter);
|
||||
|
@ -1142,15 +1139,16 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
|
|||
{
|
||||
int i;
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
static int old_pcis=0;
|
||||
static int old_thinints=0;
|
||||
|
||||
if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
|
||||
perf_stats.start_time_inbound=NOW;
|
||||
else
|
||||
old_pcis=perf_stats.pcis;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
if ((old_pcis==perf_stats.pcis)&&
|
||||
(old_thinints==perf_stats.thinints))
|
||||
perf_stats.start_time_inbound=NOW;
|
||||
else
|
||||
old_pcis=perf_stats.pcis;
|
||||
}
|
||||
|
||||
i=qdio_get_inbound_buffer_frontier(q);
|
||||
if ( (i!=GET_SAVED_FRONTIER(q)) ||
|
||||
|
@ -1340,10 +1338,10 @@ qdio_kick_inbound_handler(struct qdio_q *q)
|
|||
q->siga_error=0;
|
||||
q->error_status_flags=0;
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
|
||||
perf_stats.inbound_cnt++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
|
||||
perf_stats.inbound_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1363,9 +1361,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
|
|||
*/
|
||||
if (unlikely(qdio_reserve_q(q))) {
|
||||
qdio_release_q(q);
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
ii_p_c++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
ii_p_c++;
|
||||
/*
|
||||
* as we might just be about to stop polling, we make
|
||||
* sure that we check again at least once more
|
||||
|
@ -1373,9 +1370,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
|
|||
tiqdio_sched_tl();
|
||||
return;
|
||||
}
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
ii_p_nc++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
ii_p_nc++;
|
||||
if (unlikely(atomic_read(&q->is_in_shutdown))) {
|
||||
qdio_unmark_q(q);
|
||||
goto out;
|
||||
|
@ -1416,11 +1412,11 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
|
|||
irq_ptr = (struct qdio_irq*)q->irq_ptr;
|
||||
for (i=0;i<irq_ptr->no_output_qs;i++) {
|
||||
oq = irq_ptr->output_qs[i];
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.tl_runs--;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (!qdio_is_outbound_q_done(oq))
|
||||
if (!qdio_is_outbound_q_done(oq)) {
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.tl_runs--;
|
||||
__qdio_outbound_processing(oq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1457,9 +1453,8 @@ __qdio_inbound_processing(struct qdio_q *q)
|
|||
|
||||
if (unlikely(qdio_reserve_q(q))) {
|
||||
qdio_release_q(q);
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
i_p_c++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
i_p_c++;
|
||||
/* as we're sissies, we'll check next time */
|
||||
if (likely(!atomic_read(&q->is_in_shutdown))) {
|
||||
qdio_mark_q(q);
|
||||
|
@ -1467,10 +1462,10 @@ __qdio_inbound_processing(struct qdio_q *q)
|
|||
}
|
||||
return;
|
||||
}
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
i_p_nc++;
|
||||
perf_stats.tl_runs++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
i_p_nc++;
|
||||
perf_stats.tl_runs++;
|
||||
}
|
||||
|
||||
again:
|
||||
if (qdio_has_inbound_q_moved(q)) {
|
||||
|
@ -1516,9 +1511,8 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
|
|||
|
||||
if (unlikely(qdio_reserve_q(q))) {
|
||||
qdio_release_q(q);
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
ii_p_c++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
ii_p_c++;
|
||||
/*
|
||||
* as we might just be about to stop polling, we make
|
||||
* sure that we check again at least once more
|
||||
|
@ -1609,9 +1603,8 @@ tiqdio_tl(unsigned long data)
|
|||
{
|
||||
QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.tl_runs++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.tl_runs++;
|
||||
|
||||
tiqdio_inbound_checks();
|
||||
}
|
||||
|
@ -1918,10 +1911,10 @@ tiqdio_thinint_handler(void)
|
|||
{
|
||||
QDIO_DBF_TEXT4(0,trace,"thin_int");
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.thinints++;
|
||||
perf_stats.start_time_inbound=NOW;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
perf_stats.thinints++;
|
||||
perf_stats.start_time_inbound=NOW;
|
||||
}
|
||||
|
||||
/* SVS only when needed:
|
||||
* issue SVS to benefit from iqdio interrupt avoidance
|
||||
|
@ -1976,18 +1969,17 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
|
|||
int i;
|
||||
struct qdio_q *q;
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.pcis++;
|
||||
perf_stats.start_time_inbound=NOW;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
perf_stats.pcis++;
|
||||
perf_stats.start_time_inbound=NOW;
|
||||
}
|
||||
for (i=0;i<irq_ptr->no_input_qs;i++) {
|
||||
q=irq_ptr->input_qs[i];
|
||||
if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
|
||||
qdio_mark_q(q);
|
||||
else {
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.tl_runs--;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.tl_runs--;
|
||||
__qdio_inbound_processing(q);
|
||||
}
|
||||
}
|
||||
|
@ -1995,11 +1987,10 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
|
|||
return;
|
||||
for (i=0;i<irq_ptr->no_output_qs;i++) {
|
||||
q=irq_ptr->output_qs[i];
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.tl_runs--;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_is_outbound_q_done(q))
|
||||
continue;
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.tl_runs--;
|
||||
if (!irq_ptr->sync_done_on_outb_pcis)
|
||||
SYNC_MEMORY;
|
||||
__qdio_outbound_processing(q);
|
||||
|
@ -2045,11 +2036,13 @@ omit_handler_call:
|
|||
}
|
||||
|
||||
static void
|
||||
qdio_call_shutdown(void *data)
|
||||
qdio_call_shutdown(struct work_struct *work)
|
||||
{
|
||||
struct ccw_device_private *priv;
|
||||
struct ccw_device *cdev;
|
||||
|
||||
cdev = (struct ccw_device *)data;
|
||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||
cdev = priv->cdev;
|
||||
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
|
@ -2091,7 +2084,7 @@ qdio_timeout_handler(struct ccw_device *cdev)
|
|||
if (get_device(&cdev->dev)) {
|
||||
/* Can't call shutdown from interrupt context. */
|
||||
PREPARE_WORK(&cdev->private->kick_work,
|
||||
qdio_call_shutdown, (void *)cdev);
|
||||
qdio_call_shutdown);
|
||||
queue_work(ccw_device_work, &cdev->private->kick_work);
|
||||
}
|
||||
break;
|
||||
|
@ -3458,19 +3451,18 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
|
|||
struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
|
||||
|
||||
/* This is the outbound handling of queues */
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.start_time_outbound=NOW;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.start_time_outbound=NOW;
|
||||
|
||||
qdio_do_qdio_fill_output(q,qidx,count,buffers);
|
||||
|
||||
used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
|
||||
|
||||
if (callflags&QDIO_FLAG_DONT_SIGA) {
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
|
||||
perf_stats.outbound_cnt++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
|
||||
perf_stats.outbound_cnt++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (q->is_iqdio_q) {
|
||||
|
@ -3500,9 +3492,8 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
|
|||
qdio_kick_outbound_q(q);
|
||||
} else {
|
||||
QDIO_DBF_TEXT3(0,trace, "fast-req");
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.fast_reqs++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats)
|
||||
perf_stats.fast_reqs++;
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
@ -3513,10 +3504,10 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
|
|||
__qdio_outbound_processing(q);
|
||||
}
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
|
||||
perf_stats.outbound_cnt++;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
if (qdio_performance_stats) {
|
||||
perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
|
||||
perf_stats.outbound_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
/* count must be 1 in iqdio */
|
||||
|
@ -3574,7 +3565,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
static int
|
||||
qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
|
||||
int buffer_length, int *eof, void *data)
|
||||
|
@ -3590,29 +3580,29 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
|
|||
_OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
|
||||
_OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
|
||||
_OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
|
||||
_OUTP_IT("Number of tasklet runs (total) : %u\n",
|
||||
_OUTP_IT("Number of tasklet runs (total) : %lu\n",
|
||||
perf_stats.tl_runs);
|
||||
_OUTP_IT("\n");
|
||||
_OUTP_IT("Number of SIGA sync's issued : %u\n",
|
||||
_OUTP_IT("Number of SIGA sync's issued : %lu\n",
|
||||
perf_stats.siga_syncs);
|
||||
_OUTP_IT("Number of SIGA in's issued : %u\n",
|
||||
_OUTP_IT("Number of SIGA in's issued : %lu\n",
|
||||
perf_stats.siga_ins);
|
||||
_OUTP_IT("Number of SIGA out's issued : %u\n",
|
||||
_OUTP_IT("Number of SIGA out's issued : %lu\n",
|
||||
perf_stats.siga_outs);
|
||||
_OUTP_IT("Number of PCIs caught : %u\n",
|
||||
_OUTP_IT("Number of PCIs caught : %lu\n",
|
||||
perf_stats.pcis);
|
||||
_OUTP_IT("Number of adapter interrupts caught : %u\n",
|
||||
_OUTP_IT("Number of adapter interrupts caught : %lu\n",
|
||||
perf_stats.thinints);
|
||||
_OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
|
||||
_OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n",
|
||||
perf_stats.fast_reqs);
|
||||
_OUTP_IT("\n");
|
||||
_OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
|
||||
_OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n",
|
||||
perf_stats.inbound_time);
|
||||
_OUTP_IT("Number of inbound transfers : %u\n",
|
||||
_OUTP_IT("Number of inbound transfers : %lu\n",
|
||||
perf_stats.inbound_cnt);
|
||||
_OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
|
||||
_OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n",
|
||||
perf_stats.outbound_time);
|
||||
_OUTP_IT("Number of do_QDIOs outbound : %u\n",
|
||||
_OUTP_IT("Number of do_QDIOs outbound : %lu\n",
|
||||
perf_stats.outbound_cnt);
|
||||
_OUTP_IT("\n");
|
||||
|
||||
|
@ -3620,12 +3610,10 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
|
|||
}
|
||||
|
||||
static struct proc_dir_entry *qdio_perf_proc_file;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
|
||||
static void
|
||||
qdio_add_procfs_entry(void)
|
||||
{
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
proc_perf_file_registration=0;
|
||||
qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
|
||||
S_IFREG|0444,&proc_root);
|
||||
|
@ -3637,20 +3625,58 @@ qdio_add_procfs_entry(void)
|
|||
QDIO_PRINT_WARN("was not able to register perf. " \
|
||||
"proc-file (%i).\n",
|
||||
proc_perf_file_registration);
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
}
|
||||
|
||||
static void
|
||||
qdio_remove_procfs_entry(void)
|
||||
{
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
perf_stats.tl_runs=0;
|
||||
|
||||
if (!proc_perf_file_registration) /* means if it went ok earlier */
|
||||
remove_proc_entry(QDIO_PERF,&proc_root);
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
}
|
||||
|
||||
/**
|
||||
* attributes in sysfs
|
||||
*****************************************************************************/
|
||||
|
||||
static ssize_t
|
||||
qdio_performance_stats_show(struct bus_type *bus, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
|
||||
{
|
||||
char *tmp;
|
||||
int i;
|
||||
|
||||
i = simple_strtoul(buf, &tmp, 16);
|
||||
if ((i == 0) || (i == 1)) {
|
||||
if (i == qdio_performance_stats)
|
||||
return count;
|
||||
qdio_performance_stats = i;
|
||||
if (i==0) {
|
||||
/* reset perf. stat. info */
|
||||
i_p_nc = 0;
|
||||
i_p_c = 0;
|
||||
ii_p_nc = 0;
|
||||
ii_p_c = 0;
|
||||
o_p_nc = 0;
|
||||
o_p_c = 0;
|
||||
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
|
||||
}
|
||||
} else {
|
||||
QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
|
||||
qdio_performance_stats_store);
|
||||
|
||||
static void
|
||||
tiqdio_register_thinints(void)
|
||||
{
|
||||
|
@ -3695,6 +3721,7 @@ qdio_release_qdio_memory(void)
|
|||
kfree(indicators);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
qdio_unregister_dbf_views(void)
|
||||
{
|
||||
|
@ -3796,9 +3823,7 @@ static int __init
|
|||
init_QDIO(void)
|
||||
{
|
||||
int res;
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
void *ptr;
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
|
||||
printk("qdio: loading %s\n",version);
|
||||
|
||||
|
@ -3811,13 +3836,12 @@ init_QDIO(void)
|
|||
return res;
|
||||
|
||||
QDIO_DBF_TEXT0(0,setup,"initQDIO");
|
||||
res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
memset((void*)&perf_stats,0,sizeof(perf_stats));
|
||||
memset((void*)&perf_stats,0,sizeof(perf_stats));
|
||||
QDIO_DBF_TEXT0(0,setup,"perfstat");
|
||||
ptr=&perf_stats;
|
||||
QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
|
||||
qdio_add_procfs_entry();
|
||||
|
||||
|
@ -3841,7 +3865,7 @@ cleanup_QDIO(void)
|
|||
qdio_release_qdio_memory();
|
||||
qdio_unregister_dbf_views();
|
||||
mempool_destroy(qdio_mempool_scssc);
|
||||
|
||||
bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
|
||||
printk("qdio: %s: module removed\n",version);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,10 +12,6 @@
|
|||
#endif /* CONFIG_QDIO_DEBUG */
|
||||
#define QDIO_USE_PROCESSING_STATE
|
||||
|
||||
#ifdef CONFIG_QDIO_PERF_STATS
|
||||
#define QDIO_PERFORMANCE_STATS
|
||||
#endif /* CONFIG_QDIO_PERF_STATS */
|
||||
|
||||
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
|
||||
#define QDIO_TIMER_POLL_VALUE 1
|
||||
#define IQDIO_TIMER_POLL_VALUE 1
|
||||
|
@ -409,25 +405,23 @@ do_clear_global_summary(void)
|
|||
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
|
||||
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
|
||||
|
||||
#ifdef QDIO_PERFORMANCE_STATS
|
||||
struct qdio_perf_stats {
|
||||
unsigned int tl_runs;
|
||||
unsigned long tl_runs;
|
||||
|
||||
unsigned int siga_outs;
|
||||
unsigned int siga_ins;
|
||||
unsigned int siga_syncs;
|
||||
unsigned int pcis;
|
||||
unsigned int thinints;
|
||||
unsigned int fast_reqs;
|
||||
unsigned long siga_outs;
|
||||
unsigned long siga_ins;
|
||||
unsigned long siga_syncs;
|
||||
unsigned long pcis;
|
||||
unsigned long thinints;
|
||||
unsigned long fast_reqs;
|
||||
|
||||
__u64 start_time_outbound;
|
||||
unsigned int outbound_cnt;
|
||||
unsigned int outbound_time;
|
||||
unsigned long outbound_cnt;
|
||||
unsigned long outbound_time;
|
||||
__u64 start_time_inbound;
|
||||
unsigned int inbound_cnt;
|
||||
unsigned int inbound_time;
|
||||
unsigned long inbound_cnt;
|
||||
unsigned long inbound_time;
|
||||
};
|
||||
#endif /* QDIO_PERFORMANCE_STATS */
|
||||
|
||||
/* unlikely as the later the better */
|
||||
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/s390_rdev.h>
|
||||
#include <asm/reset.h>
|
||||
|
||||
#include "ap_bus.h"
|
||||
|
||||
|
@ -1128,6 +1129,19 @@ static void ap_poll_thread_stop(void)
|
|||
mutex_unlock(&ap_poll_thread_mutex);
|
||||
}
|
||||
|
||||
static void ap_reset(void)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < AP_DOMAINS; i++)
|
||||
for (j = 0; j < AP_DEVICES; j++)
|
||||
ap_reset_queue(AP_MKQID(j, i));
|
||||
}
|
||||
|
||||
static struct reset_call ap_reset_call = {
|
||||
.fn = ap_reset,
|
||||
};
|
||||
|
||||
/**
|
||||
* The module initialization code.
|
||||
*/
|
||||
|
@ -1144,6 +1158,7 @@ int __init ap_module_init(void)
|
|||
printk(KERN_WARNING "AP instructions not installed.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
register_reset_call(&ap_reset_call);
|
||||
|
||||
/* Create /sys/bus/ap. */
|
||||
rc = bus_register(&ap_bus_type);
|
||||
|
@ -1197,6 +1212,7 @@ out_bus:
|
|||
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
|
||||
bus_unregister(&ap_bus_type);
|
||||
out:
|
||||
unregister_reset_call(&ap_reset_call);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1227,6 +1243,7 @@ void ap_module_exit(void)
|
|||
for (i = 0; ap_bus_attrs[i]; i++)
|
||||
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
|
||||
bus_unregister(&ap_bus_type);
|
||||
unregister_reset_call(&ap_reset_call);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ZCRYPT_MONOLITHIC
|
||||
|
|
|
@ -69,11 +69,13 @@ typedef struct dasd_information2_t {
|
|||
* 0x01: readonly (ro)
|
||||
* 0x02: use diag discipline (diag)
|
||||
* 0x04: set the device initially online (internal use only)
|
||||
* 0x08: enable ERP related logging
|
||||
*/
|
||||
#define DASD_FEATURE_DEFAULT 0x00
|
||||
#define DASD_FEATURE_READONLY 0x01
|
||||
#define DASD_FEATURE_USEDIAG 0x02
|
||||
#define DASD_FEATURE_INITIAL_ONLINE 0x04
|
||||
#define DASD_FEATURE_ERPLOG 0x08
|
||||
|
||||
#define DASD_PARTN_BITS 2
|
||||
|
||||
|
|
|
@ -127,6 +127,26 @@ page_get_storage_key(unsigned long addr)
|
|||
return skey;
|
||||
}
|
||||
|
||||
extern unsigned long max_pfn;
|
||||
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
unsigned long dummy;
|
||||
int ccode;
|
||||
|
||||
if (pfn >= max_pfn)
|
||||
return 0;
|
||||
|
||||
asm volatile(
|
||||
" lra %0,0(%2)\n"
|
||||
" ipm %1\n"
|
||||
" srl %1,28\n"
|
||||
: "=d" (dummy), "=d" (ccode)
|
||||
: "a" (pfn << PAGE_SHIFT)
|
||||
: "cc");
|
||||
return !ccode;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* to align the pointer to the (next) page boundary */
|
||||
|
@ -138,8 +158,6 @@ page_get_storage_key(unsigned long addr)
|
|||
#define __va(x) (void *)(unsigned long)(x)
|
||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
#define pfn_valid(pfn) ((pfn) < max_mapnr)
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
|
||||
|
|
|
@ -25,8 +25,11 @@ extern void diag10(unsigned long addr);
|
|||
* Page allocation orders.
|
||||
*/
|
||||
#ifndef __s390x__
|
||||
# define PTE_ALLOC_ORDER 0
|
||||
# define PMD_ALLOC_ORDER 0
|
||||
# define PGD_ALLOC_ORDER 1
|
||||
#else /* __s390x__ */
|
||||
# define PTE_ALLOC_ORDER 0
|
||||
# define PMD_ALLOC_ORDER 2
|
||||
# define PGD_ALLOC_ORDER 2
|
||||
#endif /* __s390x__ */
|
||||
|
|
|
@ -107,23 +107,25 @@ extern char empty_zero_page[PAGE_SIZE];
|
|||
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
||||
* area for the same reason. ;)
|
||||
*/
|
||||
extern unsigned long vmalloc_end;
|
||||
#define VMALLOC_OFFSET (8*1024*1024)
|
||||
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \
|
||||
& ~(VMALLOC_OFFSET-1))
|
||||
#define VMALLOC_END vmalloc_end
|
||||
|
||||
/*
|
||||
* We need some free virtual space to be able to do vmalloc.
|
||||
* VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
|
||||
* area. On a machine with 2GB memory we make sure that we
|
||||
* have at least 128MB free space for vmalloc. On a machine
|
||||
* with 4TB we make sure we have at least 1GB.
|
||||
* with 4TB we make sure we have at least 128GB.
|
||||
*/
|
||||
#ifndef __s390x__
|
||||
#define VMALLOC_MIN_SIZE 0x8000000UL
|
||||
#define VMALLOC_END 0x80000000UL
|
||||
#define VMALLOC_END_INIT 0x80000000UL
|
||||
#else /* __s390x__ */
|
||||
#define VMALLOC_MIN_SIZE 0x40000000UL
|
||||
#define VMALLOC_END 0x40000000000UL
|
||||
#define VMALLOC_MIN_SIZE 0x2000000000UL
|
||||
#define VMALLOC_END_INIT 0x40000000000UL
|
||||
#endif /* __s390x__ */
|
||||
|
||||
/*
|
||||
|
@ -815,11 +817,17 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
|
|||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
extern int add_shared_memory(unsigned long start, unsigned long size);
|
||||
extern int remove_shared_memory(unsigned long start, unsigned long size);
|
||||
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
||||
#define __HAVE_ARCH_MEMMAP_INIT
|
||||
extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_ESTABLISH
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
|
|
Loading…
Reference in New Issue