Merge branch 'core/percpu' into x86/paravirt
This commit is contained in:
commit
327641da8e
|
@ -954,14 +954,14 @@ elevator_allow_merge_fn called whenever the block layer determines
|
|||
results in some sort of conflict internally,
|
||||
this hook allows it to do that.
|
||||
|
||||
elevator_dispatch_fn fills the dispatch queue with ready requests.
|
||||
elevator_dispatch_fn* fills the dispatch queue with ready requests.
|
||||
I/O schedulers are free to postpone requests by
|
||||
not filling the dispatch queue unless @force
|
||||
is non-zero. Once dispatched, I/O schedulers
|
||||
are not allowed to manipulate the requests -
|
||||
they belong to generic dispatch queue.
|
||||
|
||||
elevator_add_req_fn called to add a new request into the scheduler
|
||||
elevator_add_req_fn* called to add a new request into the scheduler
|
||||
|
||||
elevator_queue_empty_fn returns true if the merge queue is empty.
|
||||
Drivers shouldn't use this, but rather check
|
||||
|
@ -991,7 +991,7 @@ elevator_activate_req_fn Called when device driver first sees a request.
|
|||
elevator_deactivate_req_fn Called when device driver decides to delay
|
||||
a request by requeueing it.
|
||||
|
||||
elevator_init_fn
|
||||
elevator_init_fn*
|
||||
elevator_exit_fn Allocate and free any elevator specific storage
|
||||
for a queue.
|
||||
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
IP-Aliasing:
|
||||
============
|
||||
|
||||
IP-aliases are additional IP-addresses/masks hooked up to a base
|
||||
interface by adding a colon and a string when running ifconfig.
|
||||
IP-aliases are an obsolete way to manage multiple IP-addresses/masks
|
||||
per interface. Newer tools such as iproute2 support multiple
|
||||
address/prefixes per interface, but aliases are still supported
|
||||
for backwards compatibility.
|
||||
|
||||
An alias is formed by adding a colon and a string when running ifconfig.
|
||||
This string is usually numeric, but this is not a must.
|
||||
|
||||
IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking)
|
||||
is configured in the kernel.
|
||||
|
||||
|
||||
o Alias creation.
|
||||
Alias creation is done by 'magic' interface naming: eg. to create a
|
||||
200.1.1.1 alias for eth0 ...
|
||||
|
@ -38,16 +38,3 @@ o Relationship with main device
|
|||
|
||||
If the base device is shut down the added aliases will be deleted
|
||||
too.
|
||||
|
||||
|
||||
Contact
|
||||
-------
|
||||
Please finger or e-mail me:
|
||||
Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
|
||||
|
||||
Updated by Erik Schoenfelder <schoenfr@gaertner.DE>
|
||||
|
||||
; local variables:
|
||||
; mode: indented-text
|
||||
; mode: auto-fill
|
||||
; end:
|
||||
|
|
|
@ -2836,8 +2836,6 @@ S: Maintained
|
|||
MAC80211
|
||||
P: Johannes Berg
|
||||
M: johannes@sipsolutions.net
|
||||
P: Michael Wu
|
||||
M: flamingice@sourmilk.net
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://linuxwireless.org/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
|
||||
|
|
|
@ -768,7 +768,8 @@ extern int sysenter_setup(void);
|
|||
extern struct desc_ptr early_gdt_descr;
|
||||
|
||||
extern void cpu_set_gdt(int);
|
||||
extern void switch_to_new_gdt(void);
|
||||
extern void switch_to_new_gdt(int);
|
||||
extern void load_percpu_segment(int);
|
||||
extern void cpu_init(void);
|
||||
|
||||
static inline unsigned long get_debugctlmsr(void)
|
||||
|
|
|
@ -253,17 +253,8 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
|
|||
|
||||
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
|
||||
|
||||
/* Current gdt points %fs at the "master" per-cpu area: after this,
|
||||
* it's on the real one. */
|
||||
void switch_to_new_gdt(void)
|
||||
void load_percpu_segment(int cpu)
|
||||
{
|
||||
struct desc_ptr gdt_descr;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
gdt_descr.address = (long)get_cpu_gdt_table(cpu);
|
||||
gdt_descr.size = GDT_SIZE - 1;
|
||||
load_gdt(&gdt_descr);
|
||||
/* Reload the per-cpu base */
|
||||
#ifdef CONFIG_X86_32
|
||||
loadsegment(fs, __KERNEL_PERCPU);
|
||||
#else
|
||||
|
@ -272,6 +263,20 @@ void switch_to_new_gdt(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* Current gdt points %fs at the "master" per-cpu area: after this,
|
||||
* it's on the real one. */
|
||||
void switch_to_new_gdt(int cpu)
|
||||
{
|
||||
struct desc_ptr gdt_descr;
|
||||
|
||||
gdt_descr.address = (long)get_cpu_gdt_table(cpu);
|
||||
gdt_descr.size = GDT_SIZE - 1;
|
||||
load_gdt(&gdt_descr);
|
||||
/* Reload the per-cpu base */
|
||||
|
||||
load_percpu_segment(cpu);
|
||||
}
|
||||
|
||||
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
|
@ -993,7 +998,7 @@ void __cpuinit cpu_init(void)
|
|||
* and set up the GDT descriptor:
|
||||
*/
|
||||
|
||||
switch_to_new_gdt();
|
||||
switch_to_new_gdt(cpu);
|
||||
loadsegment(fs, 0);
|
||||
|
||||
load_idt((const struct desc_ptr *)&idt_descr);
|
||||
|
@ -1098,7 +1103,7 @@ void __cpuinit cpu_init(void)
|
|||
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
||||
|
||||
load_idt(&idt_descr);
|
||||
switch_to_new_gdt();
|
||||
switch_to_new_gdt(cpu);
|
||||
|
||||
/*
|
||||
* Set up and load the per-CPU TSS and LDT
|
||||
|
|
|
@ -122,7 +122,7 @@ void __init setup_per_cpu_areas(void)
|
|||
* area. Reload any changed state for the boot CPU.
|
||||
*/
|
||||
if (cpu == boot_cpu_id)
|
||||
switch_to_new_gdt();
|
||||
switch_to_new_gdt(cpu);
|
||||
|
||||
DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
|
||||
}
|
||||
|
|
|
@ -1185,7 +1185,7 @@ out:
|
|||
void __init native_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
int me = smp_processor_id();
|
||||
switch_to_new_gdt();
|
||||
switch_to_new_gdt(me);
|
||||
/* already set me in cpu_online_mask in boot_cpu_init() */
|
||||
cpumask_set_cpu(me, cpu_callout_mask);
|
||||
per_cpu(cpu_state, me) = CPU_ONLINE;
|
||||
|
|
|
@ -259,7 +259,7 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
|
|||
* the cpu's, all of which are still in the mask.
|
||||
*/
|
||||
__get_cpu_var(ptcstats).ptc_i++;
|
||||
return 0;
|
||||
return flush_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -22,6 +22,7 @@ PHDRS {
|
|||
#ifdef CONFIG_SMP
|
||||
percpu PT_LOAD FLAGS(7); /* RWE */
|
||||
#endif
|
||||
data.init2 PT_LOAD FLAGS(7); /* RWE */
|
||||
note PT_NOTE FLAGS(0); /* ___ */
|
||||
}
|
||||
SECTIONS
|
||||
|
@ -215,7 +216,7 @@ SECTIONS
|
|||
/*
|
||||
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
|
||||
* output PHDR, so the next output section - __data_nosave - should
|
||||
* switch it back to data.init. Also, pda should be at the head of
|
||||
* start another section data.init2. Also, pda should be at the head of
|
||||
* percpu area. Preallocate it and define the percpu offset symbol
|
||||
* so that it can be accessed as a percpu variable.
|
||||
*/
|
||||
|
@ -232,7 +233,7 @@ SECTIONS
|
|||
__nosave_begin = .;
|
||||
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
|
||||
*(.data.nosave)
|
||||
} :data.init /* switch back to data.init, see PERCPU_VADDR() above */
|
||||
} :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_end = .;
|
||||
|
||||
|
|
|
@ -1746,12 +1746,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
|
|||
|
||||
static void __cpuinit voyager_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
switch_to_new_gdt();
|
||||
int cpu = smp_processor_id();
|
||||
switch_to_new_gdt(cpu);
|
||||
|
||||
cpu_set(smp_processor_id(), cpu_online_map);
|
||||
cpu_set(smp_processor_id(), cpu_callout_map);
|
||||
cpu_set(smp_processor_id(), cpu_possible_map);
|
||||
cpu_set(smp_processor_id(), cpu_present_map);
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
cpu_set(cpu, cpu_callout_map);
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
}
|
||||
|
||||
static int __cpuinit voyager_cpu_up(unsigned int cpu)
|
||||
|
|
|
@ -917,6 +917,9 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
have_vcpu_info_placement = 0;
|
||||
#endif
|
||||
|
||||
/* setup percpu state */
|
||||
load_percpu_segment(0);
|
||||
|
||||
xen_smp_init();
|
||||
|
||||
/* Get mfn list */
|
||||
|
|
|
@ -170,7 +170,8 @@ static void __init xen_smp_prepare_boot_cpu(void)
|
|||
|
||||
/* We've switched to the "real" per-cpu gdt, so make sure the
|
||||
old memory can be recycled */
|
||||
make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
|
||||
make_lowmem_page_readwrite(__per_cpu_load +
|
||||
(unsigned long)&per_cpu_var(gdt_page));
|
||||
|
||||
xen_setup_vcpu_info_placement();
|
||||
}
|
||||
|
@ -235,6 +236,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
|||
ctxt->user_regs.ss = __KERNEL_DS;
|
||||
#ifdef CONFIG_X86_32
|
||||
ctxt->user_regs.fs = __KERNEL_PERCPU;
|
||||
#else
|
||||
ctxt->gs_base_kernel = per_cpu_offset(cpu);
|
||||
#endif
|
||||
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
|
||||
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
|
||||
|
|
|
@ -302,7 +302,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
|
|||
* Description:
|
||||
* Issue a flush for the block device in question. Caller can supply
|
||||
* room for storing the error offset in case of a flush error, if they
|
||||
* wish to. Caller must run wait_for_completion() on its own.
|
||||
* wish to.
|
||||
*/
|
||||
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
|
||||
{
|
||||
|
|
100
block/blk-core.c
100
block/blk-core.c
|
@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue;
|
|||
|
||||
static void drive_stat_acct(struct request *rq, int new_io)
|
||||
{
|
||||
struct gendisk *disk = rq->rq_disk;
|
||||
struct hd_struct *part;
|
||||
int rw = rq_data_dir(rq);
|
||||
int cpu;
|
||||
|
||||
if (!blk_fs_request(rq) || !rq->rq_disk)
|
||||
if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue))
|
||||
return;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
|
@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
|||
q->request_fn = rfn;
|
||||
q->prep_rq_fn = NULL;
|
||||
q->unplug_fn = generic_unplug_device;
|
||||
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
|
||||
1 << QUEUE_FLAG_STACKABLE);
|
||||
q->queue_flags = QUEUE_FLAG_DEFAULT;
|
||||
q->queue_lock = lock;
|
||||
|
||||
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
|
||||
|
@ -1125,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
|
||||
if (bio_sync(bio))
|
||||
req->cmd_flags |= REQ_RW_SYNC;
|
||||
if (bio_unplug(bio))
|
||||
req->cmd_flags |= REQ_UNPLUG;
|
||||
if (bio_rw_meta(bio))
|
||||
req->cmd_flags |= REQ_RW_META;
|
||||
|
||||
|
@ -1141,6 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
|||
int el_ret, nr_sectors;
|
||||
const unsigned short prio = bio_prio(bio);
|
||||
const int sync = bio_sync(bio);
|
||||
const int unplug = bio_unplug(bio);
|
||||
int rw_flags;
|
||||
|
||||
nr_sectors = bio_sectors(bio);
|
||||
|
@ -1244,7 +1247,7 @@ get_rq:
|
|||
blk_plug_device(q);
|
||||
add_request(q, req);
|
||||
out:
|
||||
if (sync || blk_queue_nonrot(q))
|
||||
if (unplug || blk_queue_nonrot(q))
|
||||
__generic_unplug_device(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return 0;
|
||||
|
@ -1448,6 +1451,11 @@ static inline void __generic_make_request(struct bio *bio)
|
|||
err = -EOPNOTSUPP;
|
||||
goto end_io;
|
||||
}
|
||||
if (bio_barrier(bio) && bio_has_data(bio) &&
|
||||
(q->next_ordered == QUEUE_ORDERED_NONE)) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
ret = q->make_request_fn(q, bio);
|
||||
} while (ret);
|
||||
|
@ -1655,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req)
|
|||
}
|
||||
EXPORT_SYMBOL(blkdev_dequeue_request);
|
||||
|
||||
static void blk_account_io_completion(struct request *req, unsigned int bytes)
|
||||
{
|
||||
struct gendisk *disk = req->rq_disk;
|
||||
|
||||
if (!disk || !blk_queue_io_stat(disk->queue))
|
||||
return;
|
||||
|
||||
if (blk_fs_request(req)) {
|
||||
const int rw = rq_data_dir(req);
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, req->sector);
|
||||
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
|
||||
part_stat_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_account_io_done(struct request *req)
|
||||
{
|
||||
struct gendisk *disk = req->rq_disk;
|
||||
|
||||
if (!disk || !blk_queue_io_stat(disk->queue))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Account IO completion. bar_rq isn't accounted as a normal
|
||||
* IO on queueing nor completion. Accounting the containing
|
||||
* request is enough.
|
||||
*/
|
||||
if (blk_fs_request(req) && req != &req->q->bar_rq) {
|
||||
unsigned long duration = jiffies - req->start_time;
|
||||
const int rw = rq_data_dir(req);
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(disk, req->sector);
|
||||
|
||||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __end_that_request_first - end I/O on a request
|
||||
* @req: the request being processed
|
||||
|
@ -1690,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error,
|
|||
(unsigned long long)req->sector);
|
||||
}
|
||||
|
||||
if (blk_fs_request(req) && req->rq_disk) {
|
||||
const int rw = rq_data_dir(req);
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, req->sector);
|
||||
part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
|
||||
part_stat_unlock();
|
||||
}
|
||||
blk_account_io_completion(req, nr_bytes);
|
||||
|
||||
total_bytes = bio_nbytes = 0;
|
||||
while ((bio = req->bio) != NULL) {
|
||||
|
@ -1779,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error,
|
|||
*/
|
||||
static void end_that_request_last(struct request *req, int error)
|
||||
{
|
||||
struct gendisk *disk = req->rq_disk;
|
||||
|
||||
if (blk_rq_tagged(req))
|
||||
blk_queue_end_tag(req->q, req);
|
||||
|
||||
|
@ -1792,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error)
|
|||
|
||||
blk_delete_timer(req);
|
||||
|
||||
/*
|
||||
* Account IO completion. bar_rq isn't accounted as a normal
|
||||
* IO on queueing nor completion. Accounting the containing
|
||||
* request is enough.
|
||||
*/
|
||||
if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
|
||||
unsigned long duration = jiffies - req->start_time;
|
||||
const int rw = rq_data_dir(req);
|
||||
struct hd_struct *part;
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(disk, req->sector);
|
||||
|
||||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
blk_account_io_done(req);
|
||||
|
||||
if (req->end_io)
|
||||
req->end_io(req, error);
|
||||
|
|
|
@ -309,24 +309,24 @@ static struct kobj_type integrity_ktype = {
|
|||
/**
|
||||
* blk_integrity_register - Register a gendisk as being integrity-capable
|
||||
* @disk: struct gendisk pointer to make integrity-aware
|
||||
* @template: integrity profile
|
||||
* @template: optional integrity profile to register
|
||||
*
|
||||
* Description: When a device needs to advertise itself as being able
|
||||
* to send/receive integrity metadata it must use this function to
|
||||
* register the capability with the block layer. The template is a
|
||||
* blk_integrity struct with values appropriate for the underlying
|
||||
* hardware. See Documentation/block/data-integrity.txt.
|
||||
* hardware. If template is NULL the new profile is allocated but
|
||||
* not filled out. See Documentation/block/data-integrity.txt.
|
||||
*/
|
||||
int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
|
||||
{
|
||||
struct blk_integrity *bi;
|
||||
|
||||
BUG_ON(disk == NULL);
|
||||
BUG_ON(template == NULL);
|
||||
|
||||
if (disk->integrity == NULL) {
|
||||
bi = kmem_cache_alloc(integrity_cachep,
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!bi)
|
||||
return -1;
|
||||
|
||||
|
@ -346,13 +346,16 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
|
|||
bi = disk->integrity;
|
||||
|
||||
/* Use the provided profile as template */
|
||||
bi->name = template->name;
|
||||
bi->generate_fn = template->generate_fn;
|
||||
bi->verify_fn = template->verify_fn;
|
||||
bi->tuple_size = template->tuple_size;
|
||||
bi->set_tag_fn = template->set_tag_fn;
|
||||
bi->get_tag_fn = template->get_tag_fn;
|
||||
bi->tag_size = template->tag_size;
|
||||
if (template != NULL) {
|
||||
bi->name = template->name;
|
||||
bi->generate_fn = template->generate_fn;
|
||||
bi->verify_fn = template->verify_fn;
|
||||
bi->tuple_size = template->tuple_size;
|
||||
bi->set_tag_fn = template->set_tag_fn;
|
||||
bi->get_tag_fn = template->get_tag_fn;
|
||||
bi->tag_size = template->tag_size;
|
||||
} else
|
||||
bi->name = "unsupported";
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -130,6 +130,27 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
|||
return queue_var_show(max_hw_sectors_kb, (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(!blk_queue_nonrot(q), page);
|
||||
}
|
||||
|
||||
static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long nm;
|
||||
ssize_t ret = queue_var_store(&nm, page, count);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (nm)
|
||||
queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
||||
else
|
||||
queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(blk_queue_nomerges(q), page);
|
||||
|
@ -146,8 +167,8 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
|
|||
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -176,6 +197,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t queue_iostats_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(blk_queue_io_stat(q), page);
|
||||
}
|
||||
|
||||
static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long stats;
|
||||
ssize_t ret = queue_var_store(&stats, page, count);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (stats)
|
||||
queue_flag_set(QUEUE_FLAG_IO_STAT, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct queue_sysfs_entry queue_requests_entry = {
|
||||
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_requests_show,
|
||||
|
@ -210,6 +252,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
|
|||
.show = queue_hw_sector_size_show,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_nonrot_entry = {
|
||||
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_nonrot_show,
|
||||
.store = queue_nonrot_store,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_nomerges_entry = {
|
||||
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_nomerges_show,
|
||||
|
@ -222,6 +270,12 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
|
|||
.store = queue_rq_affinity_store,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_iostats_entry = {
|
||||
.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
|
||||
.show = queue_iostats_show,
|
||||
.store = queue_iostats_store,
|
||||
};
|
||||
|
||||
static struct attribute *default_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&queue_ra_entry.attr,
|
||||
|
@ -229,8 +283,10 @@ static struct attribute *default_attrs[] = {
|
|||
&queue_max_sectors_entry.attr,
|
||||
&queue_iosched_entry.attr,
|
||||
&queue_hw_sector_size_entry.attr,
|
||||
&queue_nonrot_entry.attr,
|
||||
&queue_nomerges_entry.attr,
|
||||
&queue_rq_affinity_entry.attr,
|
||||
&queue_iostats_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -187,59 +187,12 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
|||
|
||||
static struct dentry *blk_tree_root;
|
||||
static DEFINE_MUTEX(blk_tree_mutex);
|
||||
static unsigned int root_users;
|
||||
|
||||
static inline void blk_remove_root(void)
|
||||
{
|
||||
if (blk_tree_root) {
|
||||
debugfs_remove(blk_tree_root);
|
||||
blk_tree_root = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_remove_tree(struct dentry *dir)
|
||||
{
|
||||
mutex_lock(&blk_tree_mutex);
|
||||
debugfs_remove(dir);
|
||||
if (--root_users == 0)
|
||||
blk_remove_root();
|
||||
mutex_unlock(&blk_tree_mutex);
|
||||
}
|
||||
|
||||
static struct dentry *blk_create_tree(const char *blk_name)
|
||||
{
|
||||
struct dentry *dir = NULL;
|
||||
int created = 0;
|
||||
|
||||
mutex_lock(&blk_tree_mutex);
|
||||
|
||||
if (!blk_tree_root) {
|
||||
blk_tree_root = debugfs_create_dir("block", NULL);
|
||||
if (!blk_tree_root)
|
||||
goto err;
|
||||
created = 1;
|
||||
}
|
||||
|
||||
dir = debugfs_create_dir(blk_name, blk_tree_root);
|
||||
if (dir)
|
||||
root_users++;
|
||||
else {
|
||||
/* Delete root only if we created it */
|
||||
if (created)
|
||||
blk_remove_root();
|
||||
}
|
||||
|
||||
err:
|
||||
mutex_unlock(&blk_tree_mutex);
|
||||
return dir;
|
||||
}
|
||||
|
||||
static void blk_trace_cleanup(struct blk_trace *bt)
|
||||
{
|
||||
relay_close(bt->rchan);
|
||||
debugfs_remove(bt->msg_file);
|
||||
debugfs_remove(bt->dropped_file);
|
||||
blk_remove_tree(bt->dir);
|
||||
relay_close(bt->rchan);
|
||||
free_percpu(bt->sequence);
|
||||
free_percpu(bt->msg_data);
|
||||
kfree(bt);
|
||||
|
@ -346,7 +299,18 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
|
|||
|
||||
static int blk_remove_buf_file_callback(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *parent = dentry->d_parent;
|
||||
debugfs_remove(dentry);
|
||||
|
||||
/*
|
||||
* this will fail for all but the last file, but that is ok. what we
|
||||
* care about is the top level buts->name directory going away, when
|
||||
* the last trace file is gone. Then we don't have to rmdir() that
|
||||
* manually on trace stop, so it nicely solves the issue with
|
||||
* force killing of running traces.
|
||||
*/
|
||||
|
||||
debugfs_remove(parent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -404,7 +368,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
|||
goto err;
|
||||
|
||||
ret = -ENOENT;
|
||||
dir = blk_create_tree(buts->name);
|
||||
|
||||
if (!blk_tree_root) {
|
||||
blk_tree_root = debugfs_create_dir("block", NULL);
|
||||
if (!blk_tree_root)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dir = debugfs_create_dir(buts->name, blk_tree_root);
|
||||
|
||||
if (!dir)
|
||||
goto err;
|
||||
|
||||
|
@ -458,8 +430,6 @@ probe_err:
|
|||
atomic_dec(&blk_probes_ref);
|
||||
mutex_unlock(&blk_probe_mutex);
|
||||
err:
|
||||
if (dir)
|
||||
blk_remove_tree(dir);
|
||||
if (bt) {
|
||||
if (bt->msg_file)
|
||||
debugfs_remove(bt->msg_file);
|
||||
|
|
|
@ -84,6 +84,11 @@ struct cfq_data {
|
|||
*/
|
||||
struct cfq_rb_root service_tree;
|
||||
unsigned int busy_queues;
|
||||
/*
|
||||
* Used to track any pending rt requests so we can pre-empt current
|
||||
* non-RT cfqq in service when this value is non-zero.
|
||||
*/
|
||||
unsigned int busy_rt_queues;
|
||||
|
||||
int rq_in_driver;
|
||||
int sync_flight;
|
||||
|
@ -562,6 +567,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|||
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
||||
cfq_mark_cfqq_on_rr(cfqq);
|
||||
cfqd->busy_queues++;
|
||||
if (cfq_class_rt(cfqq))
|
||||
cfqd->busy_rt_queues++;
|
||||
|
||||
cfq_resort_rr_list(cfqd, cfqq);
|
||||
}
|
||||
|
@ -581,6 +588,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|||
|
||||
BUG_ON(!cfqd->busy_queues);
|
||||
cfqd->busy_queues--;
|
||||
if (cfq_class_rt(cfqq))
|
||||
cfqd->busy_rt_queues--;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1004,6 +1013,20 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
|
|||
if (cfq_slice_used(cfqq))
|
||||
goto expire;
|
||||
|
||||
/*
|
||||
* If we have a RT cfqq waiting, then we pre-empt the current non-rt
|
||||
* cfqq.
|
||||
*/
|
||||
if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
|
||||
/*
|
||||
* We simulate this as cfqq timed out so that it gets to bank
|
||||
* the remaining of its time slice.
|
||||
*/
|
||||
cfq_log_cfqq(cfqd, cfqq, "preempt");
|
||||
cfq_slice_expired(cfqd, 1);
|
||||
goto new_queue;
|
||||
}
|
||||
|
||||
/*
|
||||
* The active queue has requests and isn't expired, allow it to
|
||||
* dispatch.
|
||||
|
@ -1067,6 +1090,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
if (RB_EMPTY_ROOT(&cfqq->sort_list))
|
||||
break;
|
||||
|
||||
/*
|
||||
* If there is a non-empty RT cfqq waiting for current
|
||||
* cfqq's timeslice to complete, pre-empt this cfqq
|
||||
*/
|
||||
if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
|
||||
break;
|
||||
|
||||
} while (dispatched < max_dispatch);
|
||||
|
||||
/*
|
||||
|
@ -1801,6 +1831,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|||
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
||||
*/
|
||||
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
|
||||
return 1;
|
||||
|
||||
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
|
||||
return 0;
|
||||
|
||||
|
@ -1870,7 +1906,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
/*
|
||||
* not the active queue - expire current slice if it is
|
||||
* idle and has expired it's mean thinktime or this new queue
|
||||
* has some old slice time left and is of higher priority
|
||||
* has some old slice time left and is of higher priority or
|
||||
* this new queue is RT and the current one is BE
|
||||
*/
|
||||
cfq_preempt_queue(cfqd, cfqq);
|
||||
cfq_mark_cfqq_must_dispatch(cfqq);
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
char e1000_driver_name[] = "e1000";
|
||||
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
|
||||
#define DRV_VERSION "7.3.20-k3-NAPI"
|
||||
#define DRV_VERSION "7.3.21-k3-NAPI"
|
||||
const char e1000_driver_version[] = DRV_VERSION;
|
||||
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
||||
|
||||
|
@ -3712,7 +3712,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 rctl, icr = er32(ICR);
|
||||
|
||||
if (unlikely(!icr))
|
||||
if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
|
||||
return IRQ_NONE; /* Not our interrupt */
|
||||
|
||||
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
|
||||
|
|
|
@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev,
|
|||
if (NULL == new_bus)
|
||||
return -ENOMEM;
|
||||
|
||||
device_init_wakeup(&ofdev->dev, 1);
|
||||
|
||||
new_bus->name = "Gianfar MII Bus",
|
||||
new_bus->read = &gfar_mdio_read,
|
||||
new_bus->write = &gfar_mdio_write,
|
||||
|
|
|
@ -210,7 +210,7 @@
|
|||
#define MAX_CMD_DESCRIPTORS_HOST 1024
|
||||
#define MAX_RCV_DESCRIPTORS_1G 2048
|
||||
#define MAX_RCV_DESCRIPTORS_10G 4096
|
||||
#define MAX_JUMBO_RCV_DESCRIPTORS 512
|
||||
#define MAX_JUMBO_RCV_DESCRIPTORS 1024
|
||||
#define MAX_LRO_RCV_DESCRIPTORS 8
|
||||
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
|
||||
#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
|
||||
|
|
|
@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
|||
}
|
||||
for (i = 0; i < n; i++) {
|
||||
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
|
||||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0)
|
||||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
|
||||
kfree(buf);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
buf[i].addr = addr;
|
||||
buf[i].data = val;
|
||||
|
|
|
@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev)
|
|||
{
|
||||
struct r6040_private *lp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = lp->base;
|
||||
struct pci_dev *pdev = lp->pdev;
|
||||
int limit = 2048;
|
||||
u16 *adrp;
|
||||
u16 cmd;
|
||||
|
|
|
@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|||
break;
|
||||
case SKFP_CLR_STATS: /* Zero out the driver statistics */
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
|
||||
} else {
|
||||
status = -EPERM;
|
||||
} else {
|
||||
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -953,7 +953,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
|
|||
do {
|
||||
udelay(1);
|
||||
val = smsc911x_reg_read(pdata, RX_DP_CTRL);
|
||||
} while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_));
|
||||
} while (--timeout && (val & RX_DP_CTRL_RX_FFWD_));
|
||||
|
||||
if (unlikely(timeout == 0))
|
||||
SMSC_WARNING(HW, "Timed out waiting for "
|
||||
|
|
|
@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev)
|
|||
|
||||
/* test the IRQ connection to the ISR */
|
||||
smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
|
||||
pd->software_irq_signal = false;
|
||||
|
||||
spin_lock_irqsave(&pd->int_lock, flags);
|
||||
/* configure interrupt deassertion timer and enable interrupts */
|
||||
|
@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev)
|
|||
smsc9420_pci_flush_write(pd);
|
||||
|
||||
timeout = 1000;
|
||||
pd->software_irq_signal = false;
|
||||
smp_wmb();
|
||||
while (timeout--) {
|
||||
if (pd->software_irq_signal)
|
||||
break;
|
||||
|
|
|
@ -9,6 +9,11 @@
|
|||
|
||||
Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
|
||||
for more information on this driver.
|
||||
|
||||
DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
|
||||
Hardware Reference Manual" is currently available at :
|
||||
http://developer.intel.com/design/network/manuals/278074.htm
|
||||
|
||||
Please submit bugs to http://bugzilla.kernel.org/ .
|
||||
*/
|
||||
|
||||
|
@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work)
|
|||
int csr12 = ioread32(ioaddr + CSR12);
|
||||
int next_tick = 60*HZ;
|
||||
int new_csr6 = 0;
|
||||
int csr14 = ioread32(ioaddr + CSR14);
|
||||
|
||||
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
|
||||
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
|
||||
csr12 |= 6;
|
||||
if (tulip_debug > 2)
|
||||
printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
|
||||
dev->name, csr12, medianame[dev->if_port]);
|
||||
|
@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work)
|
|||
new_csr6 = 0x83860000;
|
||||
dev->if_port = 3;
|
||||
iowrite32(0, ioaddr + CSR13);
|
||||
iowrite32(0x0003FF7F, ioaddr + CSR14);
|
||||
iowrite32(0x0003FFFF, ioaddr + CSR14);
|
||||
iowrite16(8, ioaddr + CSR15);
|
||||
iowrite32(1, ioaddr + CSR13);
|
||||
}
|
||||
|
@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
|
|||
struct tulip_private *tp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = tp->base_addr;
|
||||
int csr12 = ioread32(ioaddr + CSR12);
|
||||
int csr14 = ioread32(ioaddr + CSR14);
|
||||
|
||||
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
|
||||
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
|
||||
csr12 |= 6;
|
||||
if (tulip_debug > 1)
|
||||
printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
|
||||
"%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14));
|
||||
"%8.8x.\n", dev->name, csr12, csr5, csr14);
|
||||
|
||||
/* If NWay finished and we have a negotiated partner capability. */
|
||||
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
|
||||
|
@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
|
|||
int negotiated = tp->sym_advertise & (csr12 >> 16);
|
||||
tp->lpar = csr12 >> 16;
|
||||
tp->nwayset = 1;
|
||||
if (negotiated & 0x0100) dev->if_port = 5;
|
||||
/* If partner cannot negotiate, it is 10Mbps Half Duplex */
|
||||
if (!(csr12 & 0x8000)) dev->if_port = 0;
|
||||
else if (negotiated & 0x0100) dev->if_port = 5;
|
||||
else if (negotiated & 0x0080) dev->if_port = 3;
|
||||
else if (negotiated & 0x0040) dev->if_port = 4;
|
||||
else if (negotiated & 0x0020) dev->if_port = 0;
|
||||
|
@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
|
|||
tp->timer.expires = RUN_AT(3*HZ);
|
||||
add_timer(&tp->timer);
|
||||
} else if (dev->if_port == 5)
|
||||
iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
|
||||
iowrite32(csr14 & ~0x080, ioaddr + CSR14);
|
||||
} else if (dev->if_port == 0 || dev->if_port == 4) {
|
||||
if ((csr12 & 4) == 0)
|
||||
printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
|
||||
|
|
|
@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev)
|
|||
static int init_phy(struct net_device *dev)
|
||||
{
|
||||
struct ucc_geth_private *priv = netdev_priv(dev);
|
||||
struct device_node *np = priv->node;
|
||||
struct device_node *phy, *mdio;
|
||||
const phandle *ph;
|
||||
char bus_name[MII_BUS_ID_SIZE];
|
||||
const unsigned int *id;
|
||||
struct phy_device *phydev;
|
||||
char phy_id[BUS_ID_SIZE];
|
||||
|
||||
|
@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev)
|
|||
priv->oldspeed = 0;
|
||||
priv->oldduplex = -1;
|
||||
|
||||
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus,
|
||||
priv->ug_info->phy_address);
|
||||
ph = of_get_property(np, "phy-handle", NULL);
|
||||
phy = of_find_node_by_phandle(*ph);
|
||||
mdio = of_get_parent(phy);
|
||||
|
||||
id = of_get_property(phy, "reg", NULL);
|
||||
|
||||
of_node_put(phy);
|
||||
of_node_put(mdio);
|
||||
|
||||
uec_mdio_bus_name(bus_name, mdio);
|
||||
snprintf(phy_id, sizeof(phy_id), "%s:%02x",
|
||||
bus_name, *id);
|
||||
|
||||
phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
|
||||
|
||||
|
@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
|
|||
|
||||
ugeth->ug_info = ug_info;
|
||||
ugeth->dev = dev;
|
||||
ugeth->node = np;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1186,6 +1186,8 @@ struct ucc_geth_private {
|
|||
int oldspeed;
|
||||
int oldduplex;
|
||||
int oldlink;
|
||||
|
||||
struct device_node *node;
|
||||
};
|
||||
|
||||
void uec_set_ethtool_ops(struct net_device *netdev);
|
||||
|
|
|
@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
|
|||
if (err)
|
||||
goto reg_map_fail;
|
||||
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
|
||||
uec_mdio_bus_name(new_bus->id, np);
|
||||
|
||||
new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
|
||||
|
||||
|
@ -283,3 +283,13 @@ void uec_mdio_exit(void)
|
|||
{
|
||||
of_unregister_platform_driver(&uec_mdio_driver);
|
||||
}
|
||||
|
||||
void uec_mdio_bus_name(char *name, struct device_node *np)
|
||||
{
|
||||
const u32 *reg;
|
||||
|
||||
reg = of_get_property(np, "reg", NULL);
|
||||
|
||||
snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
|
|||
int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
|
||||
int __init uec_mdio_init(void);
|
||||
void uec_mdio_exit(void);
|
||||
void uec_mdio_bus_name(char *name, struct device_node *np);
|
||||
#endif /* __UEC_MII_H */
|
||||
|
|
|
@ -287,7 +287,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
|||
skb_put(skb, MAX_PACKET_LEN);
|
||||
|
||||
hdr = skb_vnet_hdr(skb);
|
||||
sg_init_one(sg, hdr, sizeof(*hdr));
|
||||
sg_set_buf(sg, hdr, sizeof(*hdr));
|
||||
|
||||
if (vi->big_packets) {
|
||||
for (i = 0; i < MAX_SKB_FRAGS; i++) {
|
||||
|
@ -488,9 +488,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
|
|||
|
||||
/* Encode metadata header at front. */
|
||||
if (vi->mergeable_rx_bufs)
|
||||
sg_init_one(sg, mhdr, sizeof(*mhdr));
|
||||
sg_set_buf(sg, mhdr, sizeof(*mhdr));
|
||||
else
|
||||
sg_init_one(sg, hdr, sizeof(*hdr));
|
||||
sg_set_buf(sg, hdr, sizeof(*hdr));
|
||||
|
||||
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
|
||||
|
||||
|
|
|
@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset(
|
|||
&fops_i2400m_reset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Debug levels control; see debug.h
|
||||
*/
|
||||
struct d_level D_LEVEL[] = {
|
||||
D_SUBMODULE_DEFINE(control),
|
||||
D_SUBMODULE_DEFINE(driver),
|
||||
D_SUBMODULE_DEFINE(debugfs),
|
||||
D_SUBMODULE_DEFINE(fw),
|
||||
D_SUBMODULE_DEFINE(netdev),
|
||||
D_SUBMODULE_DEFINE(rfkill),
|
||||
D_SUBMODULE_DEFINE(rx),
|
||||
D_SUBMODULE_DEFINE(tx),
|
||||
};
|
||||
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
|
||||
|
||||
#define __debugfs_register(prefix, name, parent) \
|
||||
do { \
|
||||
|
|
|
@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m)
|
|||
EXPORT_SYMBOL_GPL(i2400m_release);
|
||||
|
||||
|
||||
/*
|
||||
* Debug levels control; see debug.h
|
||||
*/
|
||||
struct d_level D_LEVEL[] = {
|
||||
D_SUBMODULE_DEFINE(control),
|
||||
D_SUBMODULE_DEFINE(driver),
|
||||
D_SUBMODULE_DEFINE(debugfs),
|
||||
D_SUBMODULE_DEFINE(fw),
|
||||
D_SUBMODULE_DEFINE(netdev),
|
||||
D_SUBMODULE_DEFINE(rfkill),
|
||||
D_SUBMODULE_DEFINE(rx),
|
||||
D_SUBMODULE_DEFINE(tx),
|
||||
};
|
||||
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
|
||||
|
||||
|
||||
static
|
||||
int __init i2400m_driver_init(void)
|
||||
{
|
||||
|
|
|
@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
|
|||
* it's done by reseting the chip. To accomplish this we must
|
||||
* first cleanup any pending DMA, then restart stuff after a la
|
||||
* ath5k_init.
|
||||
*
|
||||
* Called with sc->lock.
|
||||
*/
|
||||
static int
|
||||
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
|
||||
|
@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
|
||||
sc->bintval = conf->beacon_int;
|
||||
sc->power_level = conf->power_level;
|
||||
|
||||
return ath5k_chan_set(sc, conf->channel);
|
||||
ret = ath5k_chan_set(sc, conf->channel);
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv)
|
|||
priv->ucode_data_backup.len = data_size;
|
||||
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
|
||||
|
||||
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
|
||||
!priv->ucode_data_backup.v_addr)
|
||||
goto err_pci_alloc;
|
||||
|
||||
/* Initialization instructions and data */
|
||||
if (init_size && init_data_size) {
|
||||
priv->ucode_init.len = init_size;
|
||||
|
|
|
@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
|
|||
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
|
||||
|
||||
cck_power = min(cck_power, (u8)11);
|
||||
ofdm_power = min(ofdm_power, (u8)35);
|
||||
if (ofdm_power > (u8)15)
|
||||
ofdm_power = 25;
|
||||
else
|
||||
ofdm_power += 10;
|
||||
|
||||
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
|
||||
rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
|
||||
|
@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
|
|||
cck_power += priv->txpwr_base & 0xF;
|
||||
cck_power = min(cck_power, (u8)35);
|
||||
|
||||
ofdm_power = min(ofdm_power, (u8)15);
|
||||
if (ofdm_power > (u8)15)
|
||||
ofdm_power = 25;
|
||||
else
|
||||
ofdm_power += 10;
|
||||
ofdm_power += priv->txpwr_base >> 4;
|
||||
ofdm_power = min(ofdm_power, (u8)35);
|
||||
|
||||
|
|
|
@ -161,6 +161,11 @@ static void jsm_tty_stop_rx(struct uart_port *port)
|
|||
channel->ch_bd->bd_ops->disable_receiver(channel);
|
||||
}
|
||||
|
||||
static void jsm_tty_enable_ms(struct uart_port *port)
|
||||
{
|
||||
/* Nothing needed */
|
||||
}
|
||||
|
||||
static void jsm_tty_break(struct uart_port *port, int break_state)
|
||||
{
|
||||
unsigned long lock_flags;
|
||||
|
@ -345,6 +350,7 @@ static struct uart_ops jsm_ops = {
|
|||
.start_tx = jsm_tty_start_tx,
|
||||
.send_xchar = jsm_tty_send_xchar,
|
||||
.stop_rx = jsm_tty_stop_rx,
|
||||
.enable_ms = jsm_tty_enable_ms,
|
||||
.break_ctl = jsm_tty_break,
|
||||
.startup = jsm_tty_open,
|
||||
.shutdown = jsm_tty_close,
|
||||
|
|
|
@ -140,7 +140,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
|
|||
|
||||
iv = bip_vec_idx(bip, bip->bip_vcnt);
|
||||
BUG_ON(iv == NULL);
|
||||
BUG_ON(iv->bv_page != NULL);
|
||||
|
||||
iv->bv_page = page;
|
||||
iv->bv_len = len;
|
||||
|
@ -465,7 +464,7 @@ static int bio_integrity_verify(struct bio *bio)
|
|||
|
||||
if (ret) {
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
break;
|
||||
return ret;
|
||||
}
|
||||
|
||||
sectors = bv->bv_len / bi->sector_size;
|
||||
|
@ -493,18 +492,13 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
|||
struct bio_integrity_payload *bip =
|
||||
container_of(work, struct bio_integrity_payload, bip_work);
|
||||
struct bio *bio = bip->bip_bio;
|
||||
int error = bip->bip_error;
|
||||
int error;
|
||||
|
||||
if (bio_integrity_verify(bio)) {
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
error = -EIO;
|
||||
}
|
||||
error = bio_integrity_verify(bio);
|
||||
|
||||
/* Restore original bio completion handler */
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
|
||||
if (bio->bi_end_io)
|
||||
bio->bi_end_io(bio, error);
|
||||
bio_endio(bio, error);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -525,7 +519,17 @@ void bio_integrity_endio(struct bio *bio, int error)
|
|||
|
||||
BUG_ON(bip->bip_bio != bio);
|
||||
|
||||
bip->bip_error = error;
|
||||
/* In case of an I/O error there is no point in verifying the
|
||||
* integrity metadata. Restore original bio end_io handler
|
||||
* and run it.
|
||||
*/
|
||||
if (error) {
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
bio_endio(bio, error);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
||||
queue_work(kintegrityd_wq, &bip->bip_work);
|
||||
}
|
||||
|
|
|
@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
|
|||
* cannot be fixed without breaking all existing apps.
|
||||
*/
|
||||
case TUNSETIFF:
|
||||
case TUNGETIFF:
|
||||
case SIOCGIFFLAGS:
|
||||
case SIOCGIFMETRIC:
|
||||
case SIOCGIFMTU:
|
||||
|
@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM)
|
|||
COMPATIBLE_IOCTL(TUNSETDEBUG)
|
||||
COMPATIBLE_IOCTL(TUNSETPERSIST)
|
||||
COMPATIBLE_IOCTL(TUNSETOWNER)
|
||||
COMPATIBLE_IOCTL(TUNSETLINK)
|
||||
COMPATIBLE_IOCTL(TUNSETGROUP)
|
||||
COMPATIBLE_IOCTL(TUNGETFEATURES)
|
||||
COMPATIBLE_IOCTL(TUNSETOFFLOAD)
|
||||
COMPATIBLE_IOCTL(TUNSETTXFILTER)
|
||||
/* Big V */
|
||||
COMPATIBLE_IOCTL(VT_SETMODE)
|
||||
COMPATIBLE_IOCTL(VT_GETMODE)
|
||||
|
@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
|
|||
HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
|
||||
HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
|
||||
HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
|
||||
HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
|
||||
HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
|
||||
HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
|
||||
HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
|
||||
|
|
|
@ -1358,7 +1358,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|||
struct fake_dirent *fde;
|
||||
|
||||
blocksize = dir->i_sb->s_blocksize;
|
||||
dxtrace(printk("Creating index\n"));
|
||||
dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
|
||||
retval = ext3_journal_get_write_access(handle, bh);
|
||||
if (retval) {
|
||||
ext3_std_error(dir->i_sb, retval);
|
||||
|
@ -1367,6 +1367,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|||
}
|
||||
root = (struct dx_root *) bh->b_data;
|
||||
|
||||
/* The 0th block becomes the root, move the dirents out */
|
||||
fde = &root->dotdot;
|
||||
de = (struct ext3_dir_entry_2 *)((char *)fde +
|
||||
ext3_rec_len_from_disk(fde->rec_len));
|
||||
if ((char *) de >= (((char *) root) + blocksize)) {
|
||||
ext3_error(dir->i_sb, __func__,
|
||||
"invalid rec_len for '..' in inode %lu",
|
||||
dir->i_ino);
|
||||
brelse(bh);
|
||||
return -EIO;
|
||||
}
|
||||
len = ((char *) root) + blocksize - (char *) de;
|
||||
|
||||
bh2 = ext3_append (handle, dir, &block, &retval);
|
||||
if (!(bh2)) {
|
||||
brelse(bh);
|
||||
|
@ -1375,11 +1388,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|||
EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
|
||||
data1 = bh2->b_data;
|
||||
|
||||
/* The 0th block becomes the root, move the dirents out */
|
||||
fde = &root->dotdot;
|
||||
de = (struct ext3_dir_entry_2 *)((char *)fde +
|
||||
ext3_rec_len_from_disk(fde->rec_len));
|
||||
len = ((char *) root) + blocksize - (char *) de;
|
||||
memcpy (data1, de, len);
|
||||
de = (struct ext3_dir_entry_2 *) data1;
|
||||
top = data1 + len;
|
||||
|
|
|
@ -684,15 +684,15 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
|
|||
gdp = ext4_get_group_desc(sb, i, NULL);
|
||||
if (!gdp)
|
||||
continue;
|
||||
desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
|
||||
desc_count += ext4_free_blks_count(sb, gdp);
|
||||
brelse(bitmap_bh);
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, i);
|
||||
if (bitmap_bh == NULL)
|
||||
continue;
|
||||
|
||||
x = ext4_count_free(bitmap_bh, sb->s_blocksize);
|
||||
printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n",
|
||||
i, le16_to_cpu(gdp->bg_free_blocks_count), x);
|
||||
printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
|
||||
i, ext4_free_blks_count(sb, gdp), x);
|
||||
bitmap_count += x;
|
||||
}
|
||||
brelse(bitmap_bh);
|
||||
|
|
|
@ -1206,8 +1206,11 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
|
|||
|
||||
static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
|
||||
{
|
||||
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
|
||||
le32_to_cpu(raw_inode->i_size_lo);
|
||||
if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
|
||||
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
|
||||
le32_to_cpu(raw_inode->i_size_lo);
|
||||
else
|
||||
return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
|
||||
}
|
||||
|
||||
static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
|
||||
|
|
|
@ -3048,7 +3048,7 @@ retry:
|
|||
WARN_ON(ret <= 0);
|
||||
printk(KERN_ERR "%s: ext4_ext_get_blocks "
|
||||
"returned error inode#%lu, block=%u, "
|
||||
"max_blocks=%lu", __func__,
|
||||
"max_blocks=%u", __func__,
|
||||
inode->i_ino, block, max_blocks);
|
||||
#endif
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
|
|
|
@ -360,9 +360,9 @@ static int ext4_block_to_path(struct inode *inode,
|
|||
final = ptrs;
|
||||
} else {
|
||||
ext4_warning(inode->i_sb, "ext4_block_to_path",
|
||||
"block %lu > max",
|
||||
"block %lu > max in inode %lu",
|
||||
i_block + direct_blocks +
|
||||
indirect_blocks + double_blocks);
|
||||
indirect_blocks + double_blocks, inode->i_ino);
|
||||
}
|
||||
if (boundary)
|
||||
*boundary = final - 1 - (i_block & (ptrs - 1));
|
||||
|
@ -2821,9 +2821,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
|
|||
filemap_write_and_wait(mapping);
|
||||
}
|
||||
|
||||
BUG_ON(!EXT4_JOURNAL(inode) &&
|
||||
EXT4_I(inode)->i_state & EXT4_STATE_JDATA);
|
||||
|
||||
if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
|
||||
/*
|
||||
* This is a REALLY heavyweight approach, but the use of
|
||||
|
@ -3622,7 +3619,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
|
|||
* block pointed to itself, it would have been detached when
|
||||
* the block was cleared. Check for this instead of OOPSing.
|
||||
*/
|
||||
if (bh2jh(this_bh))
|
||||
if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
|
||||
ext4_handle_dirty_metadata(handle, inode, this_bh);
|
||||
else
|
||||
ext4_error(inode->i_sb, __func__,
|
||||
|
|
|
@ -3025,7 +3025,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||
goto out_err;
|
||||
|
||||
ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
|
||||
gdp->bg_free_blocks_count);
|
||||
ext4_free_blks_count(sb, gdp));
|
||||
|
||||
err = ext4_journal_get_write_access(handle, gdp_bh);
|
||||
if (err)
|
||||
|
|
|
@ -1368,7 +1368,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|||
struct fake_dirent *fde;
|
||||
|
||||
blocksize = dir->i_sb->s_blocksize;
|
||||
dxtrace(printk(KERN_DEBUG "Creating index\n"));
|
||||
dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
|
||||
retval = ext4_journal_get_write_access(handle, bh);
|
||||
if (retval) {
|
||||
ext4_std_error(dir->i_sb, retval);
|
||||
|
@ -1377,6 +1377,20 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|||
}
|
||||
root = (struct dx_root *) bh->b_data;
|
||||
|
||||
/* The 0th block becomes the root, move the dirents out */
|
||||
fde = &root->dotdot;
|
||||
de = (struct ext4_dir_entry_2 *)((char *)fde +
|
||||
ext4_rec_len_from_disk(fde->rec_len));
|
||||
if ((char *) de >= (((char *) root) + blocksize)) {
|
||||
ext4_error(dir->i_sb, __func__,
|
||||
"invalid rec_len for '..' in inode %lu",
|
||||
dir->i_ino);
|
||||
brelse(bh);
|
||||
return -EIO;
|
||||
}
|
||||
len = ((char *) root) + blocksize - (char *) de;
|
||||
|
||||
/* Allocate new block for the 0th block's dirents */
|
||||
bh2 = ext4_append(handle, dir, &block, &retval);
|
||||
if (!(bh2)) {
|
||||
brelse(bh);
|
||||
|
@ -1385,11 +1399,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
|||
EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
|
||||
data1 = bh2->b_data;
|
||||
|
||||
/* The 0th block becomes the root, move the dirents out */
|
||||
fde = &root->dotdot;
|
||||
de = (struct ext4_dir_entry_2 *)((char *)fde +
|
||||
ext4_rec_len_from_disk(fde->rec_len));
|
||||
len = ((char *) root) + blocksize - (char *) de;
|
||||
memcpy (data1, de, len);
|
||||
de = (struct ext4_dir_entry_2 *) data1;
|
||||
top = data1 + len;
|
||||
|
|
|
@ -861,12 +861,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
|
||||
gdb_off * EXT4_DESC_SIZE(sb));
|
||||
|
||||
memset(gdp, 0, EXT4_DESC_SIZE(sb));
|
||||
ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
|
||||
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
|
||||
ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
|
||||
ext4_free_blks_set(sb, gdp, input->free_blocks_count);
|
||||
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
|
||||
gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
|
||||
gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED);
|
||||
gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
|
||||
|
||||
/*
|
||||
|
|
|
@ -37,10 +37,10 @@
|
|||
#include <linux/proc_fs.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
EXPORT_SYMBOL(jbd2_journal_start);
|
||||
EXPORT_SYMBOL(jbd2_journal_restart);
|
||||
|
@ -846,8 +846,8 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
|
|||
jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid));
|
||||
seq_printf(seq, " %ums logging transaction\n",
|
||||
jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid));
|
||||
seq_printf(seq, " %luus average transaction commit time\n",
|
||||
do_div(s->journal->j_average_commit_time, 1000));
|
||||
seq_printf(seq, " %lluus average transaction commit time\n",
|
||||
div_u64(s->journal->j_average_commit_time, 1000));
|
||||
seq_printf(seq, " %lu handles per transaction\n",
|
||||
s->stats->u.run.rs_handle_count / s->stats->ts_tid);
|
||||
seq_printf(seq, " %lu blocks per transaction\n",
|
||||
|
|
|
@ -445,10 +445,9 @@
|
|||
* section in the linker script will go there too. @phdr should have
|
||||
* a leading colon.
|
||||
*
|
||||
* This macro defines three symbols, __per_cpu_load, __per_cpu_start
|
||||
* and __per_cpu_end. The first one is the vaddr of loaded percpu
|
||||
* init data. __per_cpu_start equals @vaddr and __per_cpu_end is the
|
||||
* end offset.
|
||||
* Note that this macros defines __per_cpu_load as an absolute symbol.
|
||||
* If there is no need to put the percpu section at a predetermined
|
||||
* address, use PERCPU().
|
||||
*/
|
||||
#define PERCPU_VADDR(vaddr, phdr) \
|
||||
VMLINUX_SYMBOL(__per_cpu_load) = .; \
|
||||
|
@ -470,7 +469,20 @@
|
|||
* Align to @align and outputs output section for percpu area. This
|
||||
* macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
|
||||
* __per_cpu_start will be identical.
|
||||
*
|
||||
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
|
||||
* that __per_cpu_load is defined as a relative symbol against
|
||||
* .data.percpu which is required for relocatable x86_32
|
||||
* configuration.
|
||||
*/
|
||||
#define PERCPU(align) \
|
||||
. = ALIGN(align); \
|
||||
PERCPU_VADDR( , )
|
||||
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__per_cpu_load) = .; \
|
||||
VMLINUX_SYMBOL(__per_cpu_start) = .; \
|
||||
*(.data.percpu.first) \
|
||||
*(.data.percpu.page_aligned) \
|
||||
*(.data.percpu) \
|
||||
*(.data.percpu.shared_aligned) \
|
||||
VMLINUX_SYMBOL(__per_cpu_end) = .; \
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ header-y += baycom.h
|
|||
header-y += bfs_fs.h
|
||||
header-y += blkpg.h
|
||||
header-y += bpqether.h
|
||||
header-y += bsg.h
|
||||
header-y += can.h
|
||||
header-y += cdk.h
|
||||
header-y += chio.h
|
||||
|
|
|
@ -144,7 +144,7 @@ struct bio {
|
|||
* bit 1 -- rw-ahead when set
|
||||
* bit 2 -- barrier
|
||||
* Insert a serialization point in the IO queue, forcing previously
|
||||
* submitted IO to be completed before this oen is issued.
|
||||
* submitted IO to be completed before this one is issued.
|
||||
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately
|
||||
* Note that this does NOT indicate that the IO itself is sync, just
|
||||
* that the block layer will not postpone issue of this IO by plugging.
|
||||
|
@ -163,12 +163,33 @@ struct bio {
|
|||
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
|
||||
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
|
||||
#define BIO_RW_BARRIER 2
|
||||
#define BIO_RW_SYNC 3
|
||||
#define BIO_RW_META 4
|
||||
#define BIO_RW_DISCARD 5
|
||||
#define BIO_RW_FAILFAST_DEV 6
|
||||
#define BIO_RW_FAILFAST_TRANSPORT 7
|
||||
#define BIO_RW_FAILFAST_DRIVER 8
|
||||
#define BIO_RW_SYNCIO 3
|
||||
#define BIO_RW_UNPLUG 4
|
||||
#define BIO_RW_META 5
|
||||
#define BIO_RW_DISCARD 6
|
||||
#define BIO_RW_FAILFAST_DEV 7
|
||||
#define BIO_RW_FAILFAST_TRANSPORT 8
|
||||
#define BIO_RW_FAILFAST_DRIVER 9
|
||||
|
||||
#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG)
|
||||
|
||||
#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
|
||||
|
||||
/*
|
||||
* Old defines, these should eventually be replaced by direct usage of
|
||||
* bio_rw_flagged()
|
||||
*/
|
||||
#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
|
||||
#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
|
||||
#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
|
||||
#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
|
||||
#define bio_failfast_transport(bio) \
|
||||
bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
|
||||
#define bio_failfast_driver(bio) \
|
||||
bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
|
||||
#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
|
||||
#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
|
||||
#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
|
||||
|
||||
/*
|
||||
* upper 16 bits of bi_rw define the io priority of this bio
|
||||
|
@ -193,15 +214,6 @@ struct bio {
|
|||
#define bio_offset(bio) bio_iovec((bio))->bv_offset
|
||||
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
|
||||
#define bio_sectors(bio) ((bio)->bi_size >> 9)
|
||||
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
|
||||
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
|
||||
#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
|
||||
#define bio_failfast_transport(bio) \
|
||||
((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
|
||||
#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
|
||||
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
|
||||
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
|
||||
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
|
||||
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
|
||||
|
||||
static inline unsigned int bio_cur_sectors(struct bio *bio)
|
||||
|
@ -312,7 +324,6 @@ struct bio_integrity_payload {
|
|||
void *bip_buf; /* generated integrity data */
|
||||
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
|
||||
|
||||
int bip_error; /* saved I/O error */
|
||||
unsigned int bip_size;
|
||||
|
||||
unsigned short bip_pool; /* pool the ivec came from */
|
||||
|
|
|
@ -108,6 +108,7 @@ enum rq_flag_bits {
|
|||
__REQ_RW_META, /* metadata io request */
|
||||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_INTEGRITY, /* integrity metadata has been remapped */
|
||||
__REQ_UNPLUG, /* unplug queue on submission */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
|
@ -134,6 +135,7 @@ enum rq_flag_bits {
|
|||
#define REQ_RW_META (1 << __REQ_RW_META)
|
||||
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
|
||||
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
|
||||
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
|
||||
|
@ -449,6 +451,11 @@ struct request_queue
|
|||
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
|
||||
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
|
||||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_CLUSTER) | \
|
||||
1 << QUEUE_FLAG_STACKABLE)
|
||||
|
||||
static inline int queue_is_locked(struct request_queue *q)
|
||||
{
|
||||
|
@ -565,6 +572,7 @@ enum {
|
|||
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
|
||||
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
|
||||
#define blk_queue_flushing(q) ((q)->ordseq)
|
||||
#define blk_queue_stackable(q) \
|
||||
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
|
||||
|
|
|
@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buffer_head *bh);
|
|||
int val = (expr); \
|
||||
if (!val) { \
|
||||
printk(KERN_ERR \
|
||||
"EXT3-fs unexpected failure: %s;\n",# expr); \
|
||||
"JBD2 unexpected failure: %s: %s;\n", \
|
||||
__func__, #expr); \
|
||||
printk(KERN_ERR why "\n"); \
|
||||
} \
|
||||
val; \
|
||||
|
|
|
@ -182,7 +182,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
|
|||
size = 2048;
|
||||
if (nr_pcpus >= 32)
|
||||
size = 4096;
|
||||
if (sizeof(rwlock_t) != 0) {
|
||||
if (sizeof(spinlock_t) != 0) {
|
||||
#ifdef CONFIG_NUMA
|
||||
if (size * sizeof(spinlock_t) > PAGE_SIZE)
|
||||
hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
|
||||
|
|
|
@ -658,6 +658,9 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||
validate_mm(mm);
|
||||
}
|
||||
|
||||
/* Flags that can be inherited from an existing mapping when merging */
|
||||
#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
|
||||
|
||||
/*
|
||||
* If the vma has a ->close operation then the driver probably needs to release
|
||||
* per-vma resources, so we don't attempt to merge those.
|
||||
|
@ -665,7 +668,7 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||
static inline int is_mergeable_vma(struct vm_area_struct *vma,
|
||||
struct file *file, unsigned long vm_flags)
|
||||
{
|
||||
if (vma->vm_flags != vm_flags)
|
||||
if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
|
||||
return 0;
|
||||
if (vma->vm_file != file)
|
||||
return 0;
|
||||
|
|
|
@ -2212,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
|
|||
return 0;
|
||||
|
||||
next_skb:
|
||||
block_limit = skb_headlen(st->cur_skb);
|
||||
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
|
||||
|
||||
if (abs_offset < block_limit) {
|
||||
*data = st->cur_skb->data + abs_offset;
|
||||
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
|
||||
return block_limit - abs_offset;
|
||||
}
|
||||
|
||||
|
@ -2250,13 +2250,14 @@ next_skb:
|
|||
st->frag_data = NULL;
|
||||
}
|
||||
|
||||
if (st->cur_skb->next) {
|
||||
st->cur_skb = st->cur_skb->next;
|
||||
if (st->root_skb == st->cur_skb &&
|
||||
skb_shinfo(st->root_skb)->frag_list) {
|
||||
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
|
||||
st->frag_idx = 0;
|
||||
goto next_skb;
|
||||
} else if (st->root_skb == st->cur_skb &&
|
||||
skb_shinfo(st->root_skb)->frag_list) {
|
||||
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
|
||||
} else if (st->cur_skb->next) {
|
||||
st->cur_skb = st->cur_skb->next;
|
||||
st->frag_idx = 0;
|
||||
goto next_skb;
|
||||
}
|
||||
|
||||
|
|
|
@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
|
|||
static int __init ip_auto_config(void)
|
||||
{
|
||||
__be32 addr;
|
||||
#ifdef IPCONFIG_DYNAMIC
|
||||
int retries = CONF_OPEN_RETRIES;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
|
||||
|
@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
|
|||
#endif
|
||||
ic_first_dev->next) {
|
||||
#ifdef IPCONFIG_DYNAMIC
|
||||
|
||||
int retries = CONF_OPEN_RETRIES;
|
||||
|
||||
if (ic_dynamic() < 0) {
|
||||
ic_close_devs();
|
||||
|
||||
|
|
|
@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
|
|||
struct tcp_splice_state *tss = rd_desc->arg.data;
|
||||
int ret;
|
||||
|
||||
ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
|
||||
ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
|
||||
tss->flags);
|
||||
if (ret > 0)
|
||||
rd_desc->count -= ret;
|
||||
return ret;
|
||||
|
|
|
@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
|
|||
atomic_t udp_memory_allocated;
|
||||
EXPORT_SYMBOL(udp_memory_allocated);
|
||||
|
||||
#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
|
||||
|
||||
static int udp_lib_lport_inuse(struct net *net, __u16 num,
|
||||
const struct udp_hslot *hslot,
|
||||
unsigned long *bitmap,
|
||||
struct sock *sk,
|
||||
int (*saddr_comp)(const struct sock *sk1,
|
||||
const struct sock *sk2))
|
||||
|
@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
|
|||
sk_nulls_for_each(sk2, node, &hslot->head)
|
||||
if (net_eq(sock_net(sk2), net) &&
|
||||
sk2 != sk &&
|
||||
sk2->sk_hash == num &&
|
||||
(bitmap || sk2->sk_hash == num) &&
|
||||
(!sk2->sk_reuse || !sk->sk_reuse) &&
|
||||
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|
||||
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
|
||||
(*saddr_comp)(sk, sk2))
|
||||
return 1;
|
||||
(*saddr_comp)(sk, sk2)) {
|
||||
if (bitmap)
|
||||
__set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
|
||||
bitmap);
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
|
|||
if (!snum) {
|
||||
int low, high, remaining;
|
||||
unsigned rand;
|
||||
unsigned short first;
|
||||
unsigned short first, last;
|
||||
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
|
||||
|
||||
inet_get_local_port_range(&low, &high);
|
||||
remaining = (high - low) + 1;
|
||||
|
||||
rand = net_random();
|
||||
snum = first = rand % remaining + low;
|
||||
rand |= 1;
|
||||
for (;;) {
|
||||
hslot = &udptable->hash[udp_hashfn(net, snum)];
|
||||
first = (((u64)rand * remaining) >> 32) + low;
|
||||
/*
|
||||
* force rand to be an odd multiple of UDP_HTABLE_SIZE
|
||||
*/
|
||||
rand = (rand | 1) * UDP_HTABLE_SIZE;
|
||||
for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
|
||||
hslot = &udptable->hash[udp_hashfn(net, first)];
|
||||
bitmap_zero(bitmap, PORTS_PER_CHAIN);
|
||||
spin_lock_bh(&hslot->lock);
|
||||
if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
|
||||
break;
|
||||
spin_unlock_bh(&hslot->lock);
|
||||
udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
|
||||
saddr_comp);
|
||||
|
||||
snum = first;
|
||||
/*
|
||||
* Iterate on all possible values of snum for this hash.
|
||||
* Using steps of an odd multiple of UDP_HTABLE_SIZE
|
||||
* give us randomization and full range coverage.
|
||||
*/
|
||||
do {
|
||||
snum = snum + rand;
|
||||
} while (snum < low || snum > high);
|
||||
if (snum == first)
|
||||
goto fail;
|
||||
if (low <= snum && snum <= high &&
|
||||
!test_bit(snum / UDP_HTABLE_SIZE, bitmap))
|
||||
goto found;
|
||||
snum += rand;
|
||||
} while (snum != first);
|
||||
spin_unlock_bh(&hslot->lock);
|
||||
}
|
||||
goto fail;
|
||||
} else {
|
||||
hslot = &udptable->hash[udp_hashfn(net, snum)];
|
||||
spin_lock_bh(&hslot->lock);
|
||||
if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
|
||||
if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
|
||||
goto fail_unlock;
|
||||
}
|
||||
found:
|
||||
inet_sk(sk)->num = snum;
|
||||
sk->sk_hash = snum;
|
||||
if (sk_unhashed(sk)) {
|
||||
|
|
|
@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table
|
|||
.procname = "mc_forwarding",
|
||||
.data = &ipv6_devconf.mc_forwarding,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
|
|
|
@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
|
|||
if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
|
||||
goto relookup_failed;
|
||||
|
||||
if (ip6_dst_lookup(sk, &dst2, &fl))
|
||||
if (ip6_dst_lookup(sk, &dst2, &fl2))
|
||||
goto relookup_failed;
|
||||
|
||||
err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP);
|
||||
err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
|
||||
switch (err) {
|
||||
case 0:
|
||||
dst_release(dst);
|
||||
|
|
|
@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
|
|||
* IPv6 multicast router mode is now supported ;)
|
||||
*/
|
||||
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
|
||||
!(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
|
||||
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
|
||||
/*
|
||||
* Okay, we try to forward - split and duplicate
|
||||
|
@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
if (skb2) {
|
||||
skb2->dev = skb2->dst->dev;
|
||||
ip6_mr_input(skb2);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -365,7 +365,9 @@ static int pim6_rcv(struct sk_buff *skb)
|
|||
pim = (struct pimreghdr *)skb_transport_header(skb);
|
||||
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
|
||||
(pim->flags & PIM_NULL_REGISTER) ||
|
||||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
|
||||
(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
|
||||
sizeof(*pim), IPPROTO_PIM,
|
||||
csum_partial((void *)pim, sizeof(*pim), 0)) &&
|
||||
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
|
||||
goto drop;
|
||||
|
||||
|
@ -392,7 +394,7 @@ static int pim6_rcv(struct sk_buff *skb)
|
|||
skb_pull(skb, (u8 *)encap - skb->data);
|
||||
skb_reset_network_header(skb);
|
||||
skb->dev = reg_dev;
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->ip_summed = 0;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
dst_release(skb->dst);
|
||||
|
@ -481,6 +483,7 @@ static int mif6_delete(struct net *net, int vifi)
|
|||
{
|
||||
struct mif_device *v;
|
||||
struct net_device *dev;
|
||||
struct inet6_dev *in6_dev;
|
||||
if (vifi < 0 || vifi >= net->ipv6.maxvif)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
|
@ -513,6 +516,10 @@ static int mif6_delete(struct net *net, int vifi)
|
|||
|
||||
dev_set_allmulti(dev, -1);
|
||||
|
||||
in6_dev = __in6_dev_get(dev);
|
||||
if (in6_dev)
|
||||
in6_dev->cnf.mc_forwarding--;
|
||||
|
||||
if (v->flags & MIFF_REGISTER)
|
||||
unregister_netdevice(dev);
|
||||
|
||||
|
@ -622,6 +629,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
|
|||
int vifi = vifc->mif6c_mifi;
|
||||
struct mif_device *v = &net->ipv6.vif6_table[vifi];
|
||||
struct net_device *dev;
|
||||
struct inet6_dev *in6_dev;
|
||||
int err;
|
||||
|
||||
/* Is vif busy ? */
|
||||
|
@ -662,6 +670,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
in6_dev = __in6_dev_get(dev);
|
||||
if (in6_dev)
|
||||
in6_dev->cnf.mc_forwarding++;
|
||||
|
||||
/*
|
||||
* Fill in the VIF structures
|
||||
*/
|
||||
|
@ -838,8 +850,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
|
|||
|
||||
skb->dst = dst_clone(pkt->dst);
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
skb_pull(skb, sizeof(struct ipv6hdr));
|
||||
}
|
||||
|
||||
if (net->ipv6.mroute6_sk == NULL) {
|
||||
|
@ -1222,8 +1232,10 @@ static int ip6mr_sk_init(struct sock *sk)
|
|||
|
||||
rtnl_lock();
|
||||
write_lock_bh(&mrt_lock);
|
||||
if (likely(net->ipv6.mroute6_sk == NULL))
|
||||
if (likely(net->ipv6.mroute6_sk == NULL)) {
|
||||
net->ipv6.mroute6_sk = sk;
|
||||
net->ipv6.devconf_all->mc_forwarding++;
|
||||
}
|
||||
else
|
||||
err = -EADDRINUSE;
|
||||
write_unlock_bh(&mrt_lock);
|
||||
|
@ -1242,6 +1254,7 @@ int ip6mr_sk_done(struct sock *sk)
|
|||
if (sk == net->ipv6.mroute6_sk) {
|
||||
write_lock_bh(&mrt_lock);
|
||||
net->ipv6.mroute6_sk = NULL;
|
||||
net->ipv6.devconf_all->mc_forwarding--;
|
||||
write_unlock_bh(&mrt_lock);
|
||||
|
||||
mroute_clean_tables(net);
|
||||
|
|
|
@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
|
|||
.proto = iph->nexthdr,
|
||||
};
|
||||
|
||||
if (rt6_need_strict(&iph->daddr))
|
||||
if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
|
||||
flags |= RT6_LOOKUP_F_IFACE;
|
||||
|
||||
skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
|
||||
|
|
|
@ -28,17 +28,6 @@
|
|||
#include "debug-levels.h"
|
||||
|
||||
|
||||
/* Debug framework control of debug levels */
|
||||
struct d_level D_LEVEL[] = {
|
||||
D_SUBMODULE_DEFINE(debugfs),
|
||||
D_SUBMODULE_DEFINE(id_table),
|
||||
D_SUBMODULE_DEFINE(op_msg),
|
||||
D_SUBMODULE_DEFINE(op_reset),
|
||||
D_SUBMODULE_DEFINE(op_rfkill),
|
||||
D_SUBMODULE_DEFINE(stack),
|
||||
};
|
||||
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
|
||||
|
||||
#define __debugfs_register(prefix, name, parent) \
|
||||
do { \
|
||||
result = d_level_register_debugfs(prefix, name, parent); \
|
||||
|
|
|
@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(wimax_dev_rm);
|
||||
|
||||
|
||||
/* Debug framework control of debug levels */
|
||||
struct d_level D_LEVEL[] = {
|
||||
D_SUBMODULE_DEFINE(debugfs),
|
||||
D_SUBMODULE_DEFINE(id_table),
|
||||
D_SUBMODULE_DEFINE(op_msg),
|
||||
D_SUBMODULE_DEFINE(op_reset),
|
||||
D_SUBMODULE_DEFINE(op_rfkill),
|
||||
D_SUBMODULE_DEFINE(stack),
|
||||
};
|
||||
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
|
||||
|
||||
|
||||
struct genl_family wimax_gnl_family = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
.name = "WiMAX",
|
||||
|
|
|
@ -498,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
|
|||
* calculate the number of reg rules we will need. We will need one
|
||||
* for each channel subband */
|
||||
while (country_ie_len >= 3) {
|
||||
int end_channel = 0;
|
||||
struct ieee80211_country_ie_triplet *triplet =
|
||||
(struct ieee80211_country_ie_triplet *) country_ie;
|
||||
int cur_sub_max_channel = 0, cur_channel = 0;
|
||||
|
@ -509,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
|
|||
continue;
|
||||
}
|
||||
|
||||
/* 2 GHz */
|
||||
if (triplet->chans.first_channel <= 14)
|
||||
end_channel = triplet->chans.first_channel +
|
||||
triplet->chans.num_channels;
|
||||
else
|
||||
/*
|
||||
* 5 GHz -- For example in country IEs if the first
|
||||
* channel given is 36 and the number of channels is 4
|
||||
* then the individual channel numbers defined for the
|
||||
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
|
||||
* and not 36, 37, 38, 39.
|
||||
*
|
||||
* See: http://tinyurl.com/11d-clarification
|
||||
*/
|
||||
end_channel = triplet->chans.first_channel +
|
||||
(4 * (triplet->chans.num_channels - 1));
|
||||
|
||||
cur_channel = triplet->chans.first_channel;
|
||||
cur_sub_max_channel = ieee80211_channel_to_frequency(
|
||||
cur_channel + triplet->chans.num_channels);
|
||||
cur_sub_max_channel = end_channel;
|
||||
|
||||
/* Basic sanity check */
|
||||
if (cur_sub_max_channel < cur_channel)
|
||||
|
@ -590,15 +607,6 @@ static struct ieee80211_regdomain *country_ie_2_rd(
|
|||
end_channel = triplet->chans.first_channel +
|
||||
triplet->chans.num_channels;
|
||||
else
|
||||
/*
|
||||
* 5 GHz -- For example in country IEs if the first
|
||||
* channel given is 36 and the number of channels is 4
|
||||
* then the individual channel numbers defined for the
|
||||
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
|
||||
* and not 36, 37, 38, 39.
|
||||
*
|
||||
* See: http://tinyurl.com/11d-clarification
|
||||
*/
|
||||
end_channel = triplet->chans.first_channel +
|
||||
(4 * (triplet->chans.num_channels - 1));
|
||||
|
||||
|
@ -1276,7 +1284,7 @@ static void reg_country_ie_process_debug(
|
|||
if (intersected_rd) {
|
||||
printk(KERN_DEBUG "cfg80211: We intersect both of these "
|
||||
"and get:\n");
|
||||
print_regdomain_info(rd);
|
||||
print_regdomain_info(intersected_rd);
|
||||
return;
|
||||
}
|
||||
printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");
|
||||
|
|
Loading…
Reference in New Issue