Merge branch 'rmobile-fixes-for-linus' of git://github.com/pmundt/linux-sh into renesas/soc
Conflicts: arch/arm/mach-shmobile/board-ap4evb.c This moves the addition of init_consistent_dma_size() from the board files into the common sh7372_map_io() functions where all the other contents of the board specific map_io calls have gone. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
426f1af947
|
@ -2845,6 +2845,12 @@ S: Maintained
|
|||
F: drivers/media/video/m5mols/
|
||||
F: include/media/m5mols.h
|
||||
|
||||
FUJITSU TABLET EXTRAS
|
||||
M: Robert Gerlach <khnz@gmx.de>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/fujitsu-tablet.c
|
||||
|
||||
FUSE: FILESYSTEM IN USERSPACE
|
||||
M: Miklos Szeredi <miklos@szeredi.hu>
|
||||
L: fuse-devel@lists.sourceforge.net
|
||||
|
|
|
@ -1188,6 +1188,7 @@ static struct i2c_board_info i2c1_devices[] = {
|
|||
},
|
||||
};
|
||||
|
||||
|
||||
#define GPIO_PORT9CR 0xE6051009
|
||||
#define GPIO_PORT10CR 0xE605100A
|
||||
#define USCCR1 0xE6058144
|
||||
|
|
|
@ -1335,15 +1335,6 @@ static struct i2c_board_info i2c1_devices[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static void __init mackerel_map_io(void)
|
||||
{
|
||||
sh7372_map_io();
|
||||
/* DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't
|
||||
* enough to allocate the frame buffer memory.
|
||||
*/
|
||||
init_consistent_dma_size(12 << 20);
|
||||
}
|
||||
|
||||
#define GPIO_PORT9CR 0xE6051009
|
||||
#define GPIO_PORT10CR 0xE605100A
|
||||
#define GPIO_PORT167CR 0xE60520A7
|
||||
|
@ -1563,7 +1554,7 @@ static void __init mackerel_init(void)
|
|||
}
|
||||
|
||||
MACHINE_START(MACKEREL, "mackerel")
|
||||
.map_io = mackerel_map_io,
|
||||
.map_io = sh7372_map_io,
|
||||
.init_early = sh7372_add_early_devices,
|
||||
.init_irq = sh7372_init_irq,
|
||||
.handle_irq = shmobile_handle_irq_intc,
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/sh_intc.h>
|
||||
#include <linux/sh_timer.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/sh7372.h>
|
||||
#include <mach/common.h>
|
||||
|
@ -54,6 +55,12 @@ static struct map_desc sh7372_io_desc[] __initdata = {
|
|||
void __init sh7372_map_io(void)
|
||||
{
|
||||
iotable_init(sh7372_io_desc, ARRAY_SIZE(sh7372_io_desc));
|
||||
|
||||
/*
|
||||
* DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't
|
||||
* enough to allocate the frame buffer memory.
|
||||
*/
|
||||
init_consistent_dma_size(12 << 20);
|
||||
}
|
||||
|
||||
/* SCIFA0 */
|
||||
|
|
|
@ -66,6 +66,7 @@ vector = 0
|
|||
.long exception_entry0 + vector * 6
|
||||
vector = vector + 1
|
||||
.endr
|
||||
vector = 0
|
||||
.rept 256
|
||||
.long exception_entry1 + vector * 6
|
||||
vector = vector + 1
|
||||
|
|
|
@ -79,7 +79,7 @@ struct clk div4_clks[DIV4_NR] = {
|
|||
#define MSTPCR1 0xffc80034
|
||||
#define MSTPCR2 0xffc10028
|
||||
|
||||
enum { MSTP004, MSTP000, MSTP114, MSTP113, MSTP112,
|
||||
enum { MSTP004, MSTP000, MSTP127, MSTP114, MSTP113, MSTP112,
|
||||
MSTP111, MSTP110, MSTP103, MSTP102, MSTP220,
|
||||
MSTP_NR };
|
||||
|
||||
|
@ -89,6 +89,7 @@ static struct clk mstp_clks[MSTP_NR] = {
|
|||
[MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
|
||||
|
||||
/* MSTPCR1 */
|
||||
[MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 27, 0),
|
||||
[MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
|
||||
[MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0),
|
||||
[MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0),
|
||||
|
@ -131,6 +132,7 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]),
|
||||
CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]),
|
||||
CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]),
|
||||
CLKDEV_CON_ID("rspi2", &mstp_clks[MSTP127]),
|
||||
};
|
||||
|
||||
int __init arch_clk_init(void)
|
||||
|
|
|
@ -156,7 +156,7 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
|
||||
CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
|
||||
CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
|
||||
CLKDEV_CON_ID("ubc_fck", &mstp_clks[MSTP117]),
|
||||
CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP117]),
|
||||
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
|
||||
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
|
||||
CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]),
|
||||
|
|
|
@ -31,7 +31,7 @@ UTS_MACHINE := sparc
|
|||
|
||||
#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
|
||||
KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
|
||||
KBUILD_AFLAGS += -m32
|
||||
KBUILD_AFLAGS += -m32 -Wa,-Av8
|
||||
|
||||
#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
|
||||
# Since 2.5.40, the first stage is left not btfix-ed.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -303,10 +303,7 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
|
|||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&set);
|
||||
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
|
|
|
@ -97,10 +97,7 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
|
|||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&set);
|
||||
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
|
@ -286,13 +283,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
|
|||
* the work_pending path in the return-to-user code, and
|
||||
* either way we can re-enable interrupts unconditionally.
|
||||
*/
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked,
|
||||
¤t->blocked, &ka->sa.sa_mask);
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked, sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
block_sigmask(ka, sig);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -163,7 +163,7 @@ static int __init create_sysfs_entries(void)
|
|||
|
||||
#define create_hv_attr(name) \
|
||||
if (!err) \
|
||||
err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name);
|
||||
err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name.attr);
|
||||
create_hv_attr(type);
|
||||
create_hv_attr(version);
|
||||
create_hv_attr(config_version);
|
||||
|
|
|
@ -144,7 +144,7 @@ void arch_read_unlock(arch_rwlock_t *rwlock)
|
|||
for (;;) {
|
||||
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
|
||||
val = __insn_tns((int *)&rwlock->lock);
|
||||
if (likely(val & 1) == 0) {
|
||||
if (likely((val & 1) == 0)) {
|
||||
rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
|
||||
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
|
||||
break;
|
||||
|
|
|
@ -385,14 +385,15 @@ static __initconst const u64 westmere_hw_cache_event_ids
|
|||
#define NHM_LOCAL_DRAM (1 << 14)
|
||||
#define NHM_NON_DRAM (1 << 15)
|
||||
|
||||
#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
|
||||
#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
|
||||
#define NHM_REMOTE (NHM_REMOTE_DRAM)
|
||||
|
||||
#define NHM_DMND_READ (NHM_DMND_DATA_RD)
|
||||
#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
|
||||
#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
|
||||
|
||||
#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
|
||||
#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
|
||||
#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
|
||||
#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
|
||||
|
||||
static __initconst const u64 nehalem_hw_cache_extra_regs
|
||||
|
@ -416,16 +417,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
|
|||
},
|
||||
[ C(NODE) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM,
|
||||
[ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM,
|
||||
[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
|
||||
[ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM,
|
||||
[ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM,
|
||||
[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
|
||||
[ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM,
|
||||
[ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM,
|
||||
[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
|
||||
[ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
133
block/blk-ioc.c
133
block/blk-ioc.c
|
@ -36,10 +36,22 @@ static void icq_free_icq_rcu(struct rcu_head *head)
|
|||
kmem_cache_free(icq->__rcu_icq_cache, icq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exit and free an icq. Called with both ioc and q locked.
|
||||
*/
|
||||
/* Exit an icq. Called with both ioc and q locked. */
|
||||
static void ioc_exit_icq(struct io_cq *icq)
|
||||
{
|
||||
struct elevator_type *et = icq->q->elevator->type;
|
||||
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
return;
|
||||
|
||||
if (et->ops.elevator_exit_icq_fn)
|
||||
et->ops.elevator_exit_icq_fn(icq);
|
||||
|
||||
icq->flags |= ICQ_EXITED;
|
||||
}
|
||||
|
||||
/* Release an icq. Called with both ioc and q locked. */
|
||||
static void ioc_destroy_icq(struct io_cq *icq)
|
||||
{
|
||||
struct io_context *ioc = icq->ioc;
|
||||
struct request_queue *q = icq->q;
|
||||
|
@ -60,8 +72,7 @@ static void ioc_exit_icq(struct io_cq *icq)
|
|||
if (rcu_dereference_raw(ioc->icq_hint) == icq)
|
||||
rcu_assign_pointer(ioc->icq_hint, NULL);
|
||||
|
||||
if (et->ops.elevator_exit_icq_fn)
|
||||
et->ops.elevator_exit_icq_fn(icq);
|
||||
ioc_exit_icq(icq);
|
||||
|
||||
/*
|
||||
* @icq->q might have gone away by the time RCU callback runs
|
||||
|
@ -79,7 +90,6 @@ static void ioc_release_fn(struct work_struct *work)
|
|||
{
|
||||
struct io_context *ioc = container_of(work, struct io_context,
|
||||
release_work);
|
||||
struct request_queue *last_q = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
|
@ -93,44 +103,19 @@ static void ioc_release_fn(struct work_struct *work)
|
|||
while (!hlist_empty(&ioc->icq_list)) {
|
||||
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
|
||||
struct io_cq, ioc_node);
|
||||
struct request_queue *this_q = icq->q;
|
||||
struct request_queue *q = icq->q;
|
||||
|
||||
if (this_q != last_q) {
|
||||
/*
|
||||
* Need to switch to @this_q. Once we release
|
||||
* @ioc->lock, it can go away along with @cic.
|
||||
* Hold on to it.
|
||||
*/
|
||||
__blk_get_queue(this_q);
|
||||
|
||||
/*
|
||||
* blk_put_queue() might sleep thanks to kobject
|
||||
* idiocy. Always release both locks, put and
|
||||
* restart.
|
||||
*/
|
||||
if (last_q) {
|
||||
spin_unlock(last_q->queue_lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
blk_put_queue(last_q);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
last_q = this_q;
|
||||
spin_lock_irqsave(this_q->queue_lock, flags);
|
||||
spin_lock_nested(&ioc->lock, 1);
|
||||
continue;
|
||||
if (spin_trylock(q->queue_lock)) {
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock(q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
}
|
||||
ioc_exit_icq(icq);
|
||||
}
|
||||
|
||||
if (last_q) {
|
||||
spin_unlock(last_q->queue_lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
blk_put_queue(last_q);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
}
|
||||
|
@ -145,6 +130,7 @@ static void ioc_release_fn(struct work_struct *work)
|
|||
void put_io_context(struct io_context *ioc)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool free_ioc = false;
|
||||
|
||||
if (ioc == NULL)
|
||||
return;
|
||||
|
@ -159,8 +145,13 @@ void put_io_context(struct io_context *ioc)
|
|||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
if (!hlist_empty(&ioc->icq_list))
|
||||
schedule_work(&ioc->release_work);
|
||||
else
|
||||
free_ioc = true;
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
if (free_ioc)
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
}
|
||||
EXPORT_SYMBOL(put_io_context);
|
||||
|
||||
|
@ -168,13 +159,41 @@ EXPORT_SYMBOL(put_io_context);
|
|||
void exit_io_context(struct task_struct *task)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
atomic_dec(&ioc->nr_tasks);
|
||||
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
|
||||
put_io_context(ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Need ioc lock to walk icq_list and q lock to exit icq. Perform
|
||||
* reverse double locking. Read comment in ioc_release_fn() for
|
||||
* explanation on the nested locking annotation.
|
||||
*/
|
||||
retry:
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
continue;
|
||||
if (spin_trylock(icq->q->queue_lock)) {
|
||||
ioc_exit_icq(icq);
|
||||
spin_unlock(icq->q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
|
@ -194,7 +213,7 @@ void ioc_clear_queue(struct request_queue *q)
|
|||
struct io_context *ioc = icq->ioc;
|
||||
|
||||
spin_lock(&ioc->lock);
|
||||
ioc_exit_icq(icq);
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock(&ioc->lock);
|
||||
}
|
||||
}
|
||||
|
@ -363,13 +382,13 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
|||
return icq;
|
||||
}
|
||||
|
||||
void ioc_set_changed(struct io_context *ioc, int which)
|
||||
void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
|
||||
{
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
|
||||
set_bit(which, &icq->changed);
|
||||
icq->flags |= flags;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -387,7 +406,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
|
|||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc->ioprio = ioprio;
|
||||
ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
|
||||
ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -404,11 +423,33 @@ void ioc_cgroup_changed(struct io_context *ioc)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
|
||||
ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ioc_cgroup_changed);
|
||||
|
||||
/**
|
||||
* icq_get_changed - fetch and clear icq changed mask
|
||||
* @icq: icq of interest
|
||||
*
|
||||
* Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
|
||||
* @icq->ioc->lock.
|
||||
*/
|
||||
unsigned icq_get_changed(struct io_cq *icq)
|
||||
{
|
||||
unsigned int changed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
|
||||
spin_lock_irqsave(&icq->ioc->lock, flags);
|
||||
changed = icq->flags & ICQ_CHANGED_MASK;
|
||||
icq->flags &= ~ICQ_CHANGED_MASK;
|
||||
spin_unlock_irqrestore(&icq->ioc->lock, flags);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
EXPORT_SYMBOL(icq_get_changed);
|
||||
|
||||
static int __init blk_ioc_init(void)
|
||||
{
|
||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||
|
|
|
@ -3470,20 +3470,20 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|||
const int rw = rq_data_dir(rq);
|
||||
const bool is_sync = rq_is_sync(rq);
|
||||
struct cfq_queue *cfqq;
|
||||
unsigned int changed;
|
||||
|
||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
/* handle changed notifications */
|
||||
if (unlikely(cic->icq.changed)) {
|
||||
if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
|
||||
changed_ioprio(cic);
|
||||
changed = icq_get_changed(&cic->icq);
|
||||
if (unlikely(changed & ICQ_IOPRIO_CHANGED))
|
||||
changed_ioprio(cic);
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
|
||||
changed_cgroup(cic);
|
||||
if (unlikely(changed & ICQ_CGROUP_CHANGED))
|
||||
changed_cgroup(cic);
|
||||
#endif
|
||||
}
|
||||
|
||||
new_queue:
|
||||
cfqq = cic_to_cfqq(cic, is_sync);
|
||||
|
|
|
@ -35,6 +35,7 @@ static DEFINE_IDR(ext_devt_idr);
|
|||
|
||||
static struct device_type disk_type;
|
||||
|
||||
static void disk_alloc_events(struct gendisk *disk);
|
||||
static void disk_add_events(struct gendisk *disk);
|
||||
static void disk_del_events(struct gendisk *disk);
|
||||
static void disk_release_events(struct gendisk *disk);
|
||||
|
@ -601,6 +602,8 @@ void add_disk(struct gendisk *disk)
|
|||
disk->major = MAJOR(devt);
|
||||
disk->first_minor = MINOR(devt);
|
||||
|
||||
disk_alloc_events(disk);
|
||||
|
||||
/* Register BDI before referencing it from bdev */
|
||||
bdi = &disk->queue->backing_dev_info;
|
||||
bdi_register_dev(bdi, disk_devt(disk));
|
||||
|
@ -1475,9 +1478,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
|
|||
intv = disk_events_poll_jiffies(disk);
|
||||
set_timer_slack(&ev->dwork.timer, intv / 4);
|
||||
if (check_now)
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
else if (intv)
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&ev->lock, flags);
|
||||
}
|
||||
|
@ -1521,7 +1524,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
|
|||
ev->clearing |= mask;
|
||||
if (!ev->block) {
|
||||
cancel_delayed_work(&ev->dwork);
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
}
|
||||
spin_unlock_irq(&ev->lock);
|
||||
}
|
||||
|
@ -1558,7 +1561,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
|||
|
||||
/* uncondtionally schedule event check and wait for it to finish */
|
||||
disk_block_events(disk);
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
flush_delayed_work(&ev->dwork);
|
||||
__disk_unblock_events(disk, false);
|
||||
|
||||
|
@ -1595,7 +1598,7 @@ static void disk_events_workfn(struct work_struct *work)
|
|||
|
||||
intv = disk_events_poll_jiffies(disk);
|
||||
if (!ev->block && intv)
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
|
||||
|
||||
spin_unlock_irq(&ev->lock);
|
||||
|
||||
|
@ -1733,9 +1736,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
|
|||
&disk_events_dfl_poll_msecs, 0644);
|
||||
|
||||
/*
|
||||
* disk_{add|del|release}_events - initialize and destroy disk_events.
|
||||
* disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
|
||||
*/
|
||||
static void disk_add_events(struct gendisk *disk)
|
||||
static void disk_alloc_events(struct gendisk *disk)
|
||||
{
|
||||
struct disk_events *ev;
|
||||
|
||||
|
@ -1748,16 +1751,6 @@ static void disk_add_events(struct gendisk *disk)
|
|||
return;
|
||||
}
|
||||
|
||||
if (sysfs_create_files(&disk_to_dev(disk)->kobj,
|
||||
disk_events_attrs) < 0) {
|
||||
pr_warn("%s: failed to create sysfs files for events\n",
|
||||
disk->disk_name);
|
||||
kfree(ev);
|
||||
return;
|
||||
}
|
||||
|
||||
disk->ev = ev;
|
||||
|
||||
INIT_LIST_HEAD(&ev->node);
|
||||
ev->disk = disk;
|
||||
spin_lock_init(&ev->lock);
|
||||
|
@ -1766,8 +1759,21 @@ static void disk_add_events(struct gendisk *disk)
|
|||
ev->poll_msecs = -1;
|
||||
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
|
||||
|
||||
disk->ev = ev;
|
||||
}
|
||||
|
||||
static void disk_add_events(struct gendisk *disk)
|
||||
{
|
||||
if (!disk->ev)
|
||||
return;
|
||||
|
||||
/* FIXME: error handling */
|
||||
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
|
||||
pr_warn("%s: failed to create sysfs files for events\n",
|
||||
disk->disk_name);
|
||||
|
||||
mutex_lock(&disk_events_mutex);
|
||||
list_add_tail(&ev->node, &disk_events);
|
||||
list_add_tail(&disk->ev->node, &disk_events);
|
||||
mutex_unlock(&disk_events_mutex);
|
||||
|
||||
/*
|
||||
|
|
|
@ -389,17 +389,11 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
|
|||
}
|
||||
}
|
||||
|
||||
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
struct parsed_partitions *state = NULL;
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
int p, highest, res;
|
||||
rescan:
|
||||
if (state && !IS_ERR(state)) {
|
||||
kfree(state);
|
||||
state = NULL;
|
||||
}
|
||||
int res;
|
||||
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
|
@ -412,6 +406,24 @@ rescan:
|
|||
delete_partition(disk, part->partno);
|
||||
disk_part_iter_exit(&piter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
struct parsed_partitions *state = NULL;
|
||||
struct hd_struct *part;
|
||||
int p, highest, res;
|
||||
rescan:
|
||||
if (state && !IS_ERR(state)) {
|
||||
kfree(state);
|
||||
state = NULL;
|
||||
}
|
||||
|
||||
res = drop_partitions(disk, bdev);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
if (disk->fops->revalidate_disk)
|
||||
disk->fops->revalidate_disk(disk);
|
||||
check_disk_size_change(disk, bdev);
|
||||
|
@ -515,6 +527,26 @@ rescan:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
int res;
|
||||
|
||||
if (!bdev->bd_invalidated)
|
||||
return 0;
|
||||
|
||||
res = drop_partitions(disk, bdev);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
set_capacity(disk, 0);
|
||||
check_disk_size_change(disk, bdev);
|
||||
bdev->bd_invalidated = 0;
|
||||
/* tell userspace that the media / partition table may have changed */
|
||||
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
|
|
|
@ -1177,7 +1177,8 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
|
|||
int TimeoutCounter;
|
||||
int i;
|
||||
|
||||
|
||||
memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
|
||||
|
||||
if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
|
||||
return DAC960_Failure(Controller, "DMA mask out of range");
|
||||
Controller->BounceBufferLimit = DMA_BIT_MASK(32);
|
||||
|
@ -4627,7 +4628,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
|||
DAC960_Controller_T *Controller = Command->Controller;
|
||||
DAC960_CommandType_T CommandType = Command->CommandType;
|
||||
DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
|
||||
DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode;
|
||||
DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode;
|
||||
DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode;
|
||||
DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
|
||||
|
||||
if (CommandType == DAC960_ReadCommand ||
|
||||
|
@ -4699,7 +4701,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
|||
{
|
||||
if (Controller->ShutdownMonitoringTimer)
|
||||
return;
|
||||
if (CommandOpcode == DAC960_V2_GetControllerInfo)
|
||||
if (IOCTLOpcode == DAC960_V2_GetControllerInfo)
|
||||
{
|
||||
DAC960_V2_ControllerInfo_T *NewControllerInfo =
|
||||
Controller->V2.NewControllerInformation;
|
||||
|
@ -4719,14 +4721,14 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
|||
memcpy(ControllerInfo, NewControllerInfo,
|
||||
sizeof(DAC960_V2_ControllerInfo_T));
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetEvent)
|
||||
else if (IOCTLOpcode == DAC960_V2_GetEvent)
|
||||
{
|
||||
if (CommandStatus == DAC960_V2_NormalCompletion) {
|
||||
DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
|
||||
}
|
||||
Controller->V2.NextEventSequenceNumber++;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
|
||||
else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
|
||||
CommandStatus == DAC960_V2_NormalCompletion)
|
||||
{
|
||||
DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
|
||||
|
@ -4915,7 +4917,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
|||
NewPhysicalDeviceInfo->LogicalUnit++;
|
||||
Controller->V2.PhysicalDeviceIndex++;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
|
||||
else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
|
||||
{
|
||||
unsigned int DeviceIndex;
|
||||
for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
|
||||
|
@ -4938,7 +4940,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
|||
}
|
||||
Controller->V2.NeedPhysicalDeviceInformation = false;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
|
||||
else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
|
||||
CommandStatus == DAC960_V2_NormalCompletion)
|
||||
{
|
||||
DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
|
||||
|
@ -5065,7 +5067,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
|||
[LogicalDeviceNumber] = true;
|
||||
NewLogicalDeviceInfo->LogicalDeviceNumber++;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
|
||||
else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
|
||||
{
|
||||
int LogicalDriveNumber;
|
||||
for (LogicalDriveNumber = 0;
|
||||
|
|
|
@ -1120,7 +1120,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
break;
|
||||
case MISC_GET_FW_VER: {
|
||||
struct carm_fw_ver *ver = (struct carm_fw_ver *)
|
||||
mem + sizeof(struct carm_msg_get_fw_ver);
|
||||
(mem + sizeof(struct carm_msg_get_fw_ver));
|
||||
if (!error) {
|
||||
host->fw_ver = le32_to_cpu(ver->version);
|
||||
host->flags |= (ver->features & FL_FW_VER_MASK);
|
||||
|
|
|
@ -1934,7 +1934,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
|||
}
|
||||
|
||||
if (bp->port.pmf)
|
||||
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
|
||||
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
|
||||
else
|
||||
bnx2x__link_status_update(bp);
|
||||
|
||||
|
|
|
@ -1179,10 +1179,16 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
|
|||
*/
|
||||
static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
|
||||
{
|
||||
if (!CHIP_IS_E1x(fp->bp))
|
||||
struct bnx2x *bp = fp->bp;
|
||||
if (!CHIP_IS_E1x(bp)) {
|
||||
#ifdef BCM_CNIC
|
||||
/* there are special statistics counters for FCoE 136..140 */
|
||||
if (IS_FCOE_FP(fp))
|
||||
return bp->cnic_base_cl_id + (bp->pf_num >> 1);
|
||||
#endif
|
||||
return fp->cl_id;
|
||||
else
|
||||
return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
|
||||
}
|
||||
return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
|
||||
}
|
||||
|
||||
static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
|
||||
|
|
|
@ -735,7 +735,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
|
|||
bp->dcbx_error);
|
||||
|
||||
/* mark DCBX result for PMF migration */
|
||||
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
|
||||
bnx2x_update_drv_flags(bp,
|
||||
1 << DRV_FLAGS_DCB_CONFIGURED,
|
||||
1);
|
||||
#ifdef BCM_DCBNL
|
||||
/*
|
||||
* Add new app tlvs to dcbnl
|
||||
|
@ -1020,7 +1022,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
|
|||
DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
|
||||
dcbx_lldp_params_offset);
|
||||
|
||||
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
|
||||
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
|
||||
|
||||
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
|
||||
bnx2x_dcbx_admin_mib_updated_params(bp,
|
||||
|
@ -1857,7 +1859,7 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
|
|||
* read it from shmem and update bp and netdev accordingly
|
||||
*/
|
||||
if (SHMEM2_HAS(bp, drv_flags) &&
|
||||
GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
|
||||
GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) {
|
||||
/* Read neg results if dcbx is in the FW */
|
||||
if (bnx2x_dcbx_read_shmem_neg_results(bp))
|
||||
return;
|
||||
|
|
|
@ -5601,7 +5601,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
|
|||
|
||||
/* Fill the ramrod data with provided parameters */
|
||||
rdata->function_mode = cpu_to_le16(start_params->mf_mode);
|
||||
rdata->sd_vlan_tag = start_params->sd_vlan_tag;
|
||||
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
|
||||
rdata->path_id = BP_PATH(bp);
|
||||
rdata->network_cos_mode = start_params->network_cos_mode;
|
||||
|
||||
|
|
|
@ -554,23 +554,11 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
|
|||
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
|
||||
|
||||
/* collect PFC stats */
|
||||
DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
|
||||
pstats->pfc_frames_tx_hi,
|
||||
diff.lo, new->tx_stat_gtpp_lo,
|
||||
pstats->pfc_frames_tx_lo);
|
||||
pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
|
||||
pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
|
||||
ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
|
||||
pstats->pfc_frames_tx_lo, diff.lo);
|
||||
|
||||
DIFF_64(diff.hi, new->rx_stat_grpp_hi,
|
||||
pstats->pfc_frames_rx_hi,
|
||||
diff.lo, new->rx_stat_grpp_lo,
|
||||
pstats->pfc_frames_rx_lo);
|
||||
pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
|
||||
pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
|
||||
ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
|
||||
pstats->pfc_frames_rx_lo, diff.lo);
|
||||
}
|
||||
|
||||
estats->pause_frames_received_hi =
|
||||
|
|
|
@ -359,7 +359,7 @@ static void tun_free_netdev(struct net_device *dev)
|
|||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
|
||||
sock_put(tun->socket.sk);
|
||||
sk_release_kernel(tun->socket.sk);
|
||||
}
|
||||
|
||||
/* Net device open. */
|
||||
|
@ -980,10 +980,18 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int tun_release(struct socket *sock)
|
||||
{
|
||||
if (sock->sk)
|
||||
sock_put(sock->sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Ops structure to mimic raw sockets with tun */
|
||||
static const struct proto_ops tun_socket_ops = {
|
||||
.sendmsg = tun_sendmsg,
|
||||
.recvmsg = tun_recvmsg,
|
||||
.release = tun_release,
|
||||
};
|
||||
|
||||
static struct proto tun_proto = {
|
||||
|
@ -1110,10 +1118,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
|
||||
|
||||
err = -ENOMEM;
|
||||
sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
|
||||
sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
|
||||
if (!sk)
|
||||
goto err_free_dev;
|
||||
|
||||
sk_change_net(sk, net);
|
||||
tun->socket.wq = &tun->wq;
|
||||
init_waitqueue_head(&tun->wq.wait);
|
||||
tun->socket.ops = &tun_socket_ops;
|
||||
|
@ -1174,7 +1183,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
return 0;
|
||||
|
||||
err_free_sk:
|
||||
sock_put(sk);
|
||||
tun_free_netdev(dev);
|
||||
err_free_dev:
|
||||
free_netdev(dev);
|
||||
failed:
|
||||
|
|
|
@ -1598,6 +1598,10 @@ static const struct usb_device_id products [] = {
|
|||
// Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
|
||||
USB_DEVICE (0x6189, 0x182d),
|
||||
.driver_info = (unsigned long) &ax8817x_info,
|
||||
}, {
|
||||
// Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
|
||||
USB_DEVICE (0x0df6, 0x0056),
|
||||
.driver_info = (unsigned long) &ax88178_info,
|
||||
}, {
|
||||
// corega FEther USB2-TX
|
||||
USB_DEVICE (0x07aa, 0x0017),
|
||||
|
|
|
@ -2475,7 +2475,7 @@ il3945_bg_alive_start(struct work_struct *data)
|
|||
container_of(data, struct il_priv, alive_start.work);
|
||||
|
||||
mutex_lock(&il->mutex);
|
||||
if (test_bit(S_EXIT_PENDING, &il->status))
|
||||
if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
|
||||
goto out;
|
||||
|
||||
il3945_alive_start(il);
|
||||
|
|
|
@ -1870,11 +1870,12 @@ il3945_bg_reg_txpower_periodic(struct work_struct *work)
|
|||
struct il_priv *il = container_of(work, struct il_priv,
|
||||
_3945.thermal_periodic.work);
|
||||
|
||||
if (test_bit(S_EXIT_PENDING, &il->status))
|
||||
return;
|
||||
|
||||
mutex_lock(&il->mutex);
|
||||
if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
|
||||
goto out;
|
||||
|
||||
il3945_reg_txpower_periodic(il);
|
||||
out:
|
||||
mutex_unlock(&il->mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -426,10 +426,14 @@ void rt2x00lib_txdone(struct queue_entry *entry,
|
|||
/*
|
||||
* If the data queue was below the threshold before the txdone
|
||||
* handler we must make sure the packet queue in the mac80211 stack
|
||||
* is reenabled when the txdone handler has finished.
|
||||
* is reenabled when the txdone handler has finished. This has to be
|
||||
* serialized with rt2x00mac_tx(), otherwise we can wake up queue
|
||||
* before it was stopped.
|
||||
*/
|
||||
spin_lock_bh(&entry->queue->tx_lock);
|
||||
if (!rt2x00queue_threshold(entry->queue))
|
||||
rt2x00queue_unpause_queue(entry->queue);
|
||||
spin_unlock_bh(&entry->queue->tx_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
|
||||
|
||||
|
|
|
@ -152,13 +152,22 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
|
||||
goto exit_fail;
|
||||
|
||||
/*
|
||||
* Pausing queue has to be serialized with rt2x00lib_txdone(). Note
|
||||
* we should not use spin_lock_bh variant as bottom halve was already
|
||||
* disabled before ieee80211_xmit() call.
|
||||
*/
|
||||
spin_lock(&queue->tx_lock);
|
||||
if (rt2x00queue_threshold(queue))
|
||||
rt2x00queue_pause_queue(queue);
|
||||
spin_unlock(&queue->tx_lock);
|
||||
|
||||
return;
|
||||
|
||||
exit_fail:
|
||||
spin_lock(&queue->tx_lock);
|
||||
rt2x00queue_pause_queue(queue);
|
||||
spin_unlock(&queue->tx_lock);
|
||||
exit_free_skb:
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
}
|
||||
|
|
|
@ -619,6 +619,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
|||
else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
|
||||
rt2x00queue_align_frame(skb);
|
||||
|
||||
/*
|
||||
* That function must be called with bh disabled.
|
||||
*/
|
||||
spin_lock(&queue->tx_lock);
|
||||
|
||||
if (unlikely(rt2x00queue_full(queue))) {
|
||||
|
|
|
@ -500,6 +500,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
|
|||
int pos;
|
||||
u32 reg32;
|
||||
|
||||
if (aspm_disabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Some functions in a slot might not all be PCIe functions,
|
||||
* very strange. Disable ASPM for the whole slot
|
||||
|
|
|
@ -143,6 +143,30 @@ config FUJITSU_LAPTOP_DEBUG
|
|||
|
||||
If you are not sure, say N here.
|
||||
|
||||
config FUJITSU_TABLET
|
||||
tristate "Fujitsu Tablet Extras"
|
||||
depends on ACPI
|
||||
depends on INPUT
|
||||
---help---
|
||||
This is a driver for tablets built by Fujitsu:
|
||||
|
||||
* Lifebook P1510/P1610/P1620/Txxxx
|
||||
* Stylistic ST5xxx
|
||||
* Possibly other Fujitsu tablet models
|
||||
|
||||
It adds support for the panel buttons, docking station detection,
|
||||
tablet/notebook mode detection for convertible and
|
||||
orientation detection for docked slates.
|
||||
|
||||
If you have a Fujitsu convertible or slate, say Y or M here.
|
||||
|
||||
config AMILO_RFKILL
|
||||
tristate "Fujitsu-Siemens Amilo rfkill support"
|
||||
depends on RFKILL
|
||||
---help---
|
||||
This is a driver for enabling wifi on some Fujitsu-Siemens Amilo
|
||||
laptops.
|
||||
|
||||
config TC1100_WMI
|
||||
tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
|
||||
depends on !X86_64
|
||||
|
|
|
@ -17,12 +17,14 @@ obj-$(CONFIG_ACER_WMI) += acer-wmi.o
|
|||
obj-$(CONFIG_ACERHDF) += acerhdf.o
|
||||
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
|
||||
obj-$(CONFIG_HP_WMI) += hp-wmi.o
|
||||
obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
|
||||
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
|
||||
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
|
||||
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
|
||||
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
|
||||
obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
|
||||
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
|
||||
obj-$(CONFIG_FUJITSU_TABLET) += fujitsu-tablet.o
|
||||
obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
|
||||
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
|
||||
obj-$(CONFIG_ACPI_WMI) += wmi.o
|
||||
|
|
|
@ -679,6 +679,32 @@ static acpi_status AMW0_find_mailled(void)
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
static int AMW0_set_cap_acpi_check_device_found;
|
||||
|
||||
static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
|
||||
u32 level, void *context, void **retval)
|
||||
{
|
||||
AMW0_set_cap_acpi_check_device_found = 1;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id norfkill_ids[] = {
|
||||
{ "VPC2004", 0},
|
||||
{ "IBM0068", 0},
|
||||
{ "LEN0068", 0},
|
||||
{ "", 0},
|
||||
};
|
||||
|
||||
static int AMW0_set_cap_acpi_check_device(void)
|
||||
{
|
||||
const struct acpi_device_id *id;
|
||||
|
||||
for (id = norfkill_ids; id->id[0]; id++)
|
||||
acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
|
||||
NULL, NULL);
|
||||
return AMW0_set_cap_acpi_check_device_found;
|
||||
}
|
||||
|
||||
static acpi_status AMW0_set_capabilities(void)
|
||||
{
|
||||
struct wmab_args args;
|
||||
|
@ -692,7 +718,9 @@ static acpi_status AMW0_set_capabilities(void)
|
|||
* work.
|
||||
*/
|
||||
if (wmi_has_guid(AMW0_GUID2)) {
|
||||
interface->capability |= ACER_CAP_WIRELESS;
|
||||
if ((quirks != &quirk_unknown) ||
|
||||
!AMW0_set_cap_acpi_check_device())
|
||||
interface->capability |= ACER_CAP_WIRELESS;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
* Support for rfkill on some Fujitsu-Siemens Amilo laptops.
|
||||
* Copyright 2011 Ben Hutchings.
|
||||
*
|
||||
* Based in part on the fsam7440 driver, which is:
|
||||
* Copyright 2005 Alejandro Vidal Mata & Javier Vidal Mata.
|
||||
* and on the fsaa1655g driver, which is:
|
||||
* Copyright 2006 Martin Večeřa.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/i8042.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/rfkill.h>
|
||||
|
||||
/*
|
||||
* These values were obtained from disassembling and debugging the
|
||||
* PM.exe program installed in the Fujitsu-Siemens AMILO A1655G
|
||||
*/
|
||||
#define A1655_WIFI_COMMAND 0x10C5
|
||||
#define A1655_WIFI_ON 0x25
|
||||
#define A1655_WIFI_OFF 0x45
|
||||
|
||||
static int amilo_a1655_rfkill_set_block(void *data, bool blocked)
|
||||
{
|
||||
u8 param = blocked ? A1655_WIFI_OFF : A1655_WIFI_ON;
|
||||
int rc;
|
||||
|
||||
i8042_lock_chip();
|
||||
rc = i8042_command(¶m, A1655_WIFI_COMMAND);
|
||||
i8042_unlock_chip();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct rfkill_ops amilo_a1655_rfkill_ops = {
|
||||
.set_block = amilo_a1655_rfkill_set_block
|
||||
};
|
||||
|
||||
/*
|
||||
* These values were obtained from disassembling the PM.exe program
|
||||
* installed in the Fujitsu-Siemens AMILO M 7440
|
||||
*/
|
||||
#define M7440_PORT1 0x118f
|
||||
#define M7440_PORT2 0x118e
|
||||
#define M7440_RADIO_ON1 0x12
|
||||
#define M7440_RADIO_ON2 0x80
|
||||
#define M7440_RADIO_OFF1 0x10
|
||||
#define M7440_RADIO_OFF2 0x00
|
||||
|
||||
static int amilo_m7440_rfkill_set_block(void *data, bool blocked)
|
||||
{
|
||||
u8 val1 = blocked ? M7440_RADIO_OFF1 : M7440_RADIO_ON1;
|
||||
u8 val2 = blocked ? M7440_RADIO_OFF2 : M7440_RADIO_ON2;
|
||||
|
||||
outb(val1, M7440_PORT1);
|
||||
outb(val2, M7440_PORT2);
|
||||
|
||||
/* Check whether the state has changed correctly */
|
||||
if (inb(M7440_PORT1) != val1 || inb(M7440_PORT2) != val2)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct rfkill_ops amilo_m7440_rfkill_ops = {
|
||||
.set_block = amilo_m7440_rfkill_set_block
|
||||
};
|
||||
|
||||
static const struct dmi_system_id __devinitdata amilo_rfkill_id_table[] = {
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "AMILO A1655"),
|
||||
},
|
||||
.driver_data = (void *)&amilo_a1655_rfkill_ops
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
|
||||
},
|
||||
.driver_data = (void *)&amilo_m7440_rfkill_ops
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_device *amilo_rfkill_pdev;
|
||||
static struct rfkill *amilo_rfkill_dev;
|
||||
|
||||
static int __devinit amilo_rfkill_probe(struct platform_device *device)
|
||||
{
|
||||
const struct dmi_system_id *system_id =
|
||||
dmi_first_match(amilo_rfkill_id_table);
|
||||
int rc;
|
||||
|
||||
amilo_rfkill_dev = rfkill_alloc(KBUILD_MODNAME, &device->dev,
|
||||
RFKILL_TYPE_WLAN,
|
||||
system_id->driver_data, NULL);
|
||||
if (!amilo_rfkill_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = rfkill_register(amilo_rfkill_dev);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
rfkill_destroy(amilo_rfkill_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int amilo_rfkill_remove(struct platform_device *device)
|
||||
{
|
||||
rfkill_unregister(amilo_rfkill_dev);
|
||||
rfkill_destroy(amilo_rfkill_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver amilo_rfkill_driver = {
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.probe = amilo_rfkill_probe,
|
||||
.remove = amilo_rfkill_remove,
|
||||
};
|
||||
|
||||
static int __init amilo_rfkill_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (dmi_first_match(amilo_rfkill_id_table) == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
rc = platform_driver_register(&amilo_rfkill_driver);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
|
||||
NULL, 0);
|
||||
if (IS_ERR(amilo_rfkill_pdev)) {
|
||||
rc = PTR_ERR(amilo_rfkill_pdev);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
platform_driver_unregister(&amilo_rfkill_driver);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit amilo_rfkill_exit(void)
|
||||
{
|
||||
platform_device_unregister(amilo_rfkill_pdev);
|
||||
platform_driver_unregister(&amilo_rfkill_driver);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
|
||||
|
||||
module_init(amilo_rfkill_init);
|
||||
module_exit(amilo_rfkill_exit);
|
|
@ -0,0 +1,478 @@
|
|||
/*
|
||||
* Copyright (C) 2006-2012 Robert Gerlach <khnz@gmx.de>
|
||||
* Copyright (C) 2005-2006 Jan Rychter <jan@rychter.com>
|
||||
*
|
||||
* You can redistribute and/or modify this program under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
* Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
|
||||
* Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#define MODULENAME "fujitsu-tablet"
|
||||
|
||||
#define ACPI_FUJITSU_CLASS "fujitsu"
|
||||
|
||||
#define INVERT_TABLET_MODE_BIT 0x01
|
||||
#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
|
||||
|
||||
#define KEYMAP_LEN 16
|
||||
|
||||
static const struct acpi_device_id fujitsu_ids[] = {
|
||||
{ .id = "FUJ02BD" },
|
||||
{ .id = "FUJ02BF" },
|
||||
{ .id = "" }
|
||||
};
|
||||
|
||||
struct fujitsu_config {
|
||||
unsigned short keymap[KEYMAP_LEN];
|
||||
unsigned int quirks;
|
||||
};
|
||||
|
||||
static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initconst = {
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_SCROLLDOWN,
|
||||
KEY_SCROLLUP,
|
||||
KEY_DIRECTION,
|
||||
KEY_LEFTCTRL,
|
||||
KEY_BRIGHTNESSUP,
|
||||
KEY_BRIGHTNESSDOWN,
|
||||
KEY_BRIGHTNESS_ZERO,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_LEFTALT
|
||||
};
|
||||
|
||||
static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initconst = {
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_PROG1,
|
||||
KEY_PROG2,
|
||||
KEY_DIRECTION,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_UP,
|
||||
KEY_DOWN,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_LEFTCTRL,
|
||||
KEY_LEFTALT
|
||||
};
|
||||
|
||||
static unsigned short keymap_Stylistic_Tseries[KEYMAP_LEN] __initconst = {
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_PRINT,
|
||||
KEY_BACKSPACE,
|
||||
KEY_SPACE,
|
||||
KEY_ENTER,
|
||||
KEY_BRIGHTNESSUP,
|
||||
KEY_BRIGHTNESSDOWN,
|
||||
KEY_DOWN,
|
||||
KEY_UP,
|
||||
KEY_SCROLLUP,
|
||||
KEY_SCROLLDOWN,
|
||||
KEY_LEFTCTRL,
|
||||
KEY_LEFTALT
|
||||
};
|
||||
|
||||
static unsigned short keymap_Stylistic_ST5xxx[KEYMAP_LEN] __initconst = {
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_MAIL,
|
||||
KEY_DIRECTION,
|
||||
KEY_ESC,
|
||||
KEY_ENTER,
|
||||
KEY_BRIGHTNESSUP,
|
||||
KEY_BRIGHTNESSDOWN,
|
||||
KEY_DOWN,
|
||||
KEY_UP,
|
||||
KEY_SCROLLUP,
|
||||
KEY_SCROLLDOWN,
|
||||
KEY_LEFTCTRL,
|
||||
KEY_LEFTALT
|
||||
};
|
||||
|
||||
static struct {
|
||||
struct input_dev *idev;
|
||||
struct fujitsu_config config;
|
||||
unsigned long prev_keymask;
|
||||
|
||||
char phys[21];
|
||||
|
||||
int irq;
|
||||
int io_base;
|
||||
int io_length;
|
||||
} fujitsu;
|
||||
|
||||
static u8 fujitsu_ack(void)
|
||||
{
|
||||
return inb(fujitsu.io_base + 2);
|
||||
}
|
||||
|
||||
static u8 fujitsu_status(void)
|
||||
{
|
||||
return inb(fujitsu.io_base + 6);
|
||||
}
|
||||
|
||||
static u8 fujitsu_read_register(const u8 addr)
|
||||
{
|
||||
outb(addr, fujitsu.io_base);
|
||||
return inb(fujitsu.io_base + 4);
|
||||
}
|
||||
|
||||
static void fujitsu_send_state(void)
|
||||
{
|
||||
int state;
|
||||
int dock, tablet_mode;
|
||||
|
||||
state = fujitsu_read_register(0xdd);
|
||||
|
||||
dock = state & 0x02;
|
||||
|
||||
if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
|
||||
tablet_mode = 1;
|
||||
} else{
|
||||
tablet_mode = state & 0x01;
|
||||
if (fujitsu.config.quirks & INVERT_TABLET_MODE_BIT)
|
||||
tablet_mode = !tablet_mode;
|
||||
}
|
||||
|
||||
input_report_switch(fujitsu.idev, SW_DOCK, dock);
|
||||
input_report_switch(fujitsu.idev, SW_TABLET_MODE, tablet_mode);
|
||||
input_sync(fujitsu.idev);
|
||||
}
|
||||
|
||||
static void fujitsu_reset(void)
|
||||
{
|
||||
int timeout = 50;
|
||||
|
||||
fujitsu_ack();
|
||||
|
||||
while ((fujitsu_status() & 0x02) && (--timeout))
|
||||
msleep(20);
|
||||
|
||||
fujitsu_send_state();
|
||||
}
|
||||
|
||||
static int __devinit input_fujitsu_setup(struct device *parent,
|
||||
const char *name, const char *phys)
|
||||
{
|
||||
struct input_dev *idev;
|
||||
int error;
|
||||
int i;
|
||||
|
||||
idev = input_allocate_device();
|
||||
if (!idev)
|
||||
return -ENOMEM;
|
||||
|
||||
idev->dev.parent = parent;
|
||||
idev->phys = phys;
|
||||
idev->name = name;
|
||||
idev->id.bustype = BUS_HOST;
|
||||
idev->id.vendor = 0x1734; /* Fujitsu Siemens Computer GmbH */
|
||||
idev->id.product = 0x0001;
|
||||
idev->id.version = 0x0101;
|
||||
|
||||
idev->keycode = fujitsu.config.keymap;
|
||||
idev->keycodesize = sizeof(fujitsu.config.keymap[0]);
|
||||
idev->keycodemax = ARRAY_SIZE(fujitsu.config.keymap);
|
||||
|
||||
__set_bit(EV_REP, idev->evbit);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fujitsu.config.keymap); i++)
|
||||
if (fujitsu.config.keymap[i])
|
||||
input_set_capability(idev, EV_KEY, fujitsu.config.keymap[i]);
|
||||
|
||||
input_set_capability(idev, EV_MSC, MSC_SCAN);
|
||||
|
||||
input_set_capability(idev, EV_SW, SW_DOCK);
|
||||
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
|
||||
|
||||
input_set_capability(idev, EV_SW, SW_DOCK);
|
||||
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
|
||||
|
||||
error = input_register_device(idev);
|
||||
if (error) {
|
||||
input_free_device(idev);
|
||||
return error;
|
||||
}
|
||||
|
||||
fujitsu.idev = idev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void input_fujitsu_remove(void)
|
||||
{
|
||||
input_unregister_device(fujitsu.idev);
|
||||
}
|
||||
|
||||
static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
unsigned long keymask, changed;
|
||||
unsigned int keycode;
|
||||
int pressed;
|
||||
int i;
|
||||
|
||||
if (unlikely(!(fujitsu_status() & 0x01)))
|
||||
return IRQ_NONE;
|
||||
|
||||
fujitsu_send_state();
|
||||
|
||||
keymask = fujitsu_read_register(0xde);
|
||||
keymask |= fujitsu_read_register(0xdf) << 8;
|
||||
keymask ^= 0xffff;
|
||||
|
||||
changed = keymask ^ fujitsu.prev_keymask;
|
||||
if (changed) {
|
||||
fujitsu.prev_keymask = keymask;
|
||||
|
||||
for_each_set_bit(i, &changed, KEYMAP_LEN) {
|
||||
keycode = fujitsu.config.keymap[i];
|
||||
pressed = keymask & changed & BIT(i);
|
||||
|
||||
if (pressed)
|
||||
input_event(fujitsu.idev, EV_MSC, MSC_SCAN, i);
|
||||
|
||||
input_report_key(fujitsu.idev, keycode, pressed);
|
||||
input_sync(fujitsu.idev);
|
||||
}
|
||||
}
|
||||
|
||||
fujitsu_ack();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
|
||||
{
|
||||
printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
|
||||
memcpy(fujitsu.config.keymap, dmi->driver_data,
|
||||
sizeof(fujitsu.config.keymap));
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
|
||||
{
|
||||
fujitsu_dmi_default(dmi);
|
||||
fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
|
||||
fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct dmi_system_id dmi_ids[] __initconst = {
|
||||
{
|
||||
.callback = fujitsu_dmi_default,
|
||||
.ident = "Fujitsu Siemens P/T Series",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK")
|
||||
},
|
||||
.driver_data = keymap_Lifebook_Tseries
|
||||
},
|
||||
{
|
||||
.callback = fujitsu_dmi_default,
|
||||
.ident = "Fujitsu Lifebook T Series",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T")
|
||||
},
|
||||
.driver_data = keymap_Lifebook_Tseries
|
||||
},
|
||||
{
|
||||
.callback = fujitsu_dmi_stylistic,
|
||||
.ident = "Fujitsu Siemens Stylistic T Series",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic T")
|
||||
},
|
||||
.driver_data = keymap_Stylistic_Tseries
|
||||
},
|
||||
{
|
||||
.callback = fujitsu_dmi_default,
|
||||
.ident = "Fujitsu LifeBook U810",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook U810")
|
||||
},
|
||||
.driver_data = keymap_Lifebook_U810
|
||||
},
|
||||
{
|
||||
.callback = fujitsu_dmi_stylistic,
|
||||
.ident = "Fujitsu Siemens Stylistic ST5xxx Series",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "STYLISTIC ST5")
|
||||
},
|
||||
.driver_data = keymap_Stylistic_ST5xxx
|
||||
},
|
||||
{
|
||||
.callback = fujitsu_dmi_stylistic,
|
||||
.ident = "Fujitsu Siemens Stylistic ST5xxx Series",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic ST5")
|
||||
},
|
||||
.driver_data = keymap_Stylistic_ST5xxx
|
||||
},
|
||||
{
|
||||
.callback = fujitsu_dmi_default,
|
||||
.ident = "Unknown (using defaults)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, ""),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "")
|
||||
},
|
||||
.driver_data = keymap_Lifebook_Tseries
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static acpi_status __devinit
|
||||
fujitsu_walk_resources(struct acpi_resource *res, void *data)
|
||||
{
|
||||
switch (res->type) {
|
||||
case ACPI_RESOURCE_TYPE_IRQ:
|
||||
fujitsu.irq = res->data.irq.interrupts[0];
|
||||
return AE_OK;
|
||||
|
||||
case ACPI_RESOURCE_TYPE_IO:
|
||||
fujitsu.io_base = res->data.io.minimum;
|
||||
fujitsu.io_length = res->data.io.address_length;
|
||||
return AE_OK;
|
||||
|
||||
case ACPI_RESOURCE_TYPE_END_TAG:
|
||||
if (fujitsu.irq && fujitsu.io_base)
|
||||
return AE_OK;
|
||||
else
|
||||
return AE_NOT_FOUND;
|
||||
|
||||
default:
|
||||
return AE_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
static int __devinit acpi_fujitsu_add(struct acpi_device *adev)
|
||||
{
|
||||
acpi_status status;
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return -EINVAL;
|
||||
|
||||
status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
|
||||
fujitsu_walk_resources, NULL);
|
||||
if (ACPI_FAILURE(status) || !fujitsu.irq || !fujitsu.io_base)
|
||||
return -ENODEV;
|
||||
|
||||
sprintf(acpi_device_name(adev), "Fujitsu %s", acpi_device_hid(adev));
|
||||
sprintf(acpi_device_class(adev), "%s", ACPI_FUJITSU_CLASS);
|
||||
|
||||
snprintf(fujitsu.phys, sizeof(fujitsu.phys),
|
||||
"%s/input0", acpi_device_hid(adev));
|
||||
|
||||
error = input_fujitsu_setup(&adev->dev,
|
||||
acpi_device_name(adev), fujitsu.phys);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!request_region(fujitsu.io_base, fujitsu.io_length, MODULENAME)) {
|
||||
input_fujitsu_remove();
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
fujitsu_reset();
|
||||
|
||||
error = request_irq(fujitsu.irq, fujitsu_interrupt,
|
||||
IRQF_SHARED, MODULENAME, fujitsu_interrupt);
|
||||
if (error) {
|
||||
release_region(fujitsu.io_base, fujitsu.io_length);
|
||||
input_fujitsu_remove();
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
|
||||
{
|
||||
free_irq(fujitsu.irq, fujitsu_interrupt);
|
||||
release_region(fujitsu.io_base, fujitsu.io_length);
|
||||
input_fujitsu_remove();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_fujitsu_resume(struct acpi_device *adev)
|
||||
{
|
||||
fujitsu_reset();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct acpi_driver acpi_fujitsu_driver = {
|
||||
.name = MODULENAME,
|
||||
.class = "hotkey",
|
||||
.ids = fujitsu_ids,
|
||||
.ops = {
|
||||
.add = acpi_fujitsu_add,
|
||||
.remove = acpi_fujitsu_remove,
|
||||
.resume = acpi_fujitsu_resume,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init fujitsu_module_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
dmi_check_system(dmi_ids);
|
||||
|
||||
error = acpi_bus_register_driver(&acpi_fujitsu_driver);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit fujitsu_module_exit(void)
|
||||
{
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_driver);
|
||||
}
|
||||
|
||||
module_init(fujitsu_module_init);
|
||||
module_exit(fujitsu_module_exit);
|
||||
|
||||
MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
|
||||
MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("2.4");
|
||||
|
||||
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
|
|
@ -562,8 +562,8 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
|
|||
|
||||
num_sifr = acpi_pcc_get_sqty(device);
|
||||
|
||||
if (num_sifr > 255) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
|
||||
if (num_sifr < 0 || num_sifr > 255) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr out of range"));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
|
|
@ -226,7 +226,7 @@ static int da9052_regulator_set_voltage_int(struct regulator_dev *rdev,
|
|||
if (min_uV < info->min_uV)
|
||||
min_uV = info->min_uV;
|
||||
|
||||
*selector = (min_uV - info->min_uV) / info->step_uV;
|
||||
*selector = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
|
||||
|
||||
ret = da9052_list_voltage(rdev, *selector);
|
||||
if (ret < 0)
|
||||
|
@ -318,10 +318,10 @@ static int da9052_set_buckperi_voltage(struct regulator_dev *rdev, int min_uV,
|
|||
if ((regulator->da9052->chip_id == DA9052) &&
|
||||
(min_uV >= DA9052_CONST_3uV))
|
||||
*selector = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
|
||||
((min_uV - DA9052_CONST_3uV) /
|
||||
(DA9052_BUCK_PERI_3uV_STEP));
|
||||
DIV_ROUND_UP(min_uV - DA9052_CONST_3uV,
|
||||
DA9052_BUCK_PERI_3uV_STEP);
|
||||
else
|
||||
*selector = (min_uV - info->min_uV) / info->step_uV;
|
||||
*selector = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
|
||||
|
||||
ret = da9052_list_buckperi_voltage(rdev, *selector);
|
||||
if (ret < 0)
|
||||
|
@ -400,6 +400,7 @@ static struct regulator_ops da9052_ldo_ops = {
|
|||
.ops = &da9052_ldo5_6_ops,\
|
||||
.type = REGULATOR_VOLTAGE,\
|
||||
.id = _id,\
|
||||
.n_voltages = (max - min) / step + 1, \
|
||||
.owner = THIS_MODULE,\
|
||||
},\
|
||||
.min_uV = (min) * 1000,\
|
||||
|
@ -417,6 +418,7 @@ static struct regulator_ops da9052_ldo_ops = {
|
|||
.ops = &da9052_ldo_ops,\
|
||||
.type = REGULATOR_VOLTAGE,\
|
||||
.id = _id,\
|
||||
.n_voltages = (max - min) / step + 1, \
|
||||
.owner = THIS_MODULE,\
|
||||
},\
|
||||
.min_uV = (min) * 1000,\
|
||||
|
@ -434,6 +436,7 @@ static struct regulator_ops da9052_ldo_ops = {
|
|||
.ops = &da9052_dcdc_ops,\
|
||||
.type = REGULATOR_VOLTAGE,\
|
||||
.id = _id,\
|
||||
.n_voltages = (max - min) / step + 1, \
|
||||
.owner = THIS_MODULE,\
|
||||
},\
|
||||
.min_uV = (min) * 1000,\
|
||||
|
@ -451,6 +454,7 @@ static struct regulator_ops da9052_ldo_ops = {
|
|||
.ops = &da9052_buckperi_ops,\
|
||||
.type = REGULATOR_VOLTAGE,\
|
||||
.id = _id,\
|
||||
.n_voltages = (max - min) / step + 1, \
|
||||
.owner = THIS_MODULE,\
|
||||
},\
|
||||
.min_uV = (min) * 1000,\
|
||||
|
|
|
@ -481,7 +481,7 @@ static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
|
|||
if (i >= info->n_voltages)
|
||||
i = info->n_voltages - 1;
|
||||
|
||||
*selector = info->voltages[i];
|
||||
*selector = i;
|
||||
|
||||
return write_field(hw, &info->voltage, i);
|
||||
}
|
||||
|
|
|
@ -1710,6 +1710,8 @@ static int sci_startup(struct uart_port *port)
|
|||
|
||||
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
|
||||
|
||||
pm_runtime_put_noidle(port->dev);
|
||||
|
||||
sci_port_enable(s);
|
||||
|
||||
ret = sci_request_irq(s);
|
||||
|
@ -1737,6 +1739,8 @@ static void sci_shutdown(struct uart_port *port)
|
|||
sci_free_irq(s);
|
||||
|
||||
sci_port_disable(s);
|
||||
|
||||
pm_runtime_get_noresume(port->dev);
|
||||
}
|
||||
|
||||
static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
|
||||
|
@ -2075,6 +2079,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
|
|||
sci_init_gpios(sci_port);
|
||||
|
||||
pm_runtime_irq_safe(&dev->dev);
|
||||
pm_runtime_get_noresume(&dev->dev);
|
||||
pm_runtime_enable(&dev->dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
* The latter is necessary to prevent ghost
|
||||
* partitions on a removed medium.
|
||||
*/
|
||||
if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
|
||||
rescan_partitions(disk, bdev);
|
||||
if (bdev->bd_invalidated) {
|
||||
if (!ret)
|
||||
rescan_partitions(disk, bdev);
|
||||
else if (ret == -ENOMEDIUM)
|
||||
invalidate_partitions(disk, bdev);
|
||||
}
|
||||
if (ret)
|
||||
goto out_clear;
|
||||
} else {
|
||||
|
@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
if (bdev->bd_disk->fops->open)
|
||||
ret = bdev->bd_disk->fops->open(bdev, mode);
|
||||
/* the same as first opener case, read comment there */
|
||||
if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
|
||||
rescan_partitions(bdev->bd_disk, bdev);
|
||||
if (bdev->bd_invalidated) {
|
||||
if (!ret)
|
||||
rescan_partitions(bdev->bd_disk, bdev);
|
||||
else if (ret == -ENOMEDIUM)
|
||||
invalidate_partitions(bdev->bd_disk, bdev);
|
||||
}
|
||||
if (ret)
|
||||
goto out_unlock_bdev;
|
||||
}
|
||||
|
|
|
@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|||
for (lockp = &inode->i_flock; *lockp != NULL; \
|
||||
lockp = &(*lockp)->fl_next)
|
||||
|
||||
struct lock_to_push {
|
||||
struct list_head llist;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
__u32 pid;
|
||||
__u16 netfid;
|
||||
__u8 type;
|
||||
};
|
||||
|
||||
static int
|
||||
cifs_push_posix_locks(struct cifsFileInfo *cfile)
|
||||
{
|
||||
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
|
||||
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
|
||||
struct file_lock *flock, **before;
|
||||
struct cifsLockInfo *lck, *tmp;
|
||||
unsigned int count = 0, i = 0;
|
||||
int rc = 0, xid, type;
|
||||
struct list_head locks_to_send, *el;
|
||||
struct lock_to_push *lck, *tmp;
|
||||
__u64 length;
|
||||
struct list_head locks_to_send;
|
||||
|
||||
xid = GetXid();
|
||||
|
||||
|
@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
|
|||
return rc;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&locks_to_send);
|
||||
|
||||
lock_flocks();
|
||||
cifs_for_each_lock(cfile->dentry->d_inode, before) {
|
||||
if ((*before)->fl_flags & FL_POSIX)
|
||||
count++;
|
||||
}
|
||||
unlock_flocks();
|
||||
|
||||
INIT_LIST_HEAD(&locks_to_send);
|
||||
|
||||
/*
|
||||
* Allocating count locks is enough because no locks can be added to
|
||||
* the list while we are holding cinode->lock_mutex that protects
|
||||
* locking operations of this inode.
|
||||
*/
|
||||
for (; i < count; i++) {
|
||||
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
|
||||
if (!lck) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
list_add_tail(&lck->llist, &locks_to_send);
|
||||
}
|
||||
|
||||
i = 0;
|
||||
el = locks_to_send.next;
|
||||
lock_flocks();
|
||||
cifs_for_each_lock(cfile->dentry->d_inode, before) {
|
||||
if (el == &locks_to_send) {
|
||||
/* something is really wrong */
|
||||
cERROR(1, "Can't push all brlocks!");
|
||||
break;
|
||||
}
|
||||
flock = *before;
|
||||
if ((flock->fl_flags & FL_POSIX) == 0)
|
||||
continue;
|
||||
length = 1 + flock->fl_end - flock->fl_start;
|
||||
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
|
||||
type = CIFS_RDLCK;
|
||||
else
|
||||
type = CIFS_WRLCK;
|
||||
|
||||
lck = cifs_lock_init(flock->fl_start, length, type,
|
||||
cfile->netfid);
|
||||
if (!lck) {
|
||||
rc = -ENOMEM;
|
||||
goto send_locks;
|
||||
}
|
||||
lck = list_entry(el, struct lock_to_push, llist);
|
||||
lck->pid = flock->fl_pid;
|
||||
|
||||
list_add_tail(&lck->llist, &locks_to_send);
|
||||
lck->netfid = cfile->netfid;
|
||||
lck->length = length;
|
||||
lck->type = type;
|
||||
lck->offset = flock->fl_start;
|
||||
i++;
|
||||
el = el->next;
|
||||
}
|
||||
|
||||
send_locks:
|
||||
unlock_flocks();
|
||||
|
||||
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
|
||||
|
@ -979,11 +1015,18 @@ send_locks:
|
|||
kfree(lck);
|
||||
}
|
||||
|
||||
out:
|
||||
cinode->can_cache_brlcks = false;
|
||||
mutex_unlock(&cinode->lock_mutex);
|
||||
|
||||
FreeXid(xid);
|
||||
return rc;
|
||||
err_out:
|
||||
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
|
||||
list_del(&lck->llist);
|
||||
kfree(lck);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -105,7 +105,6 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
|
|||
struct cifs_tcon *pTcon;
|
||||
struct super_block *sb;
|
||||
char *full_path;
|
||||
struct cifs_ntsd *pacl;
|
||||
|
||||
if (direntry == NULL)
|
||||
return -EIO;
|
||||
|
@ -164,23 +163,24 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
|
|||
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
|
||||
strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
|
||||
#ifdef CONFIG_CIFS_ACL
|
||||
struct cifs_ntsd *pacl;
|
||||
pacl = kmalloc(value_size, GFP_KERNEL);
|
||||
if (!pacl) {
|
||||
cFYI(1, "%s: Can't allocate memory for ACL",
|
||||
__func__);
|
||||
rc = -ENOMEM;
|
||||
} else {
|
||||
#ifdef CONFIG_CIFS_ACL
|
||||
memcpy(pacl, ea_value, value_size);
|
||||
rc = set_cifs_acl(pacl, value_size,
|
||||
direntry->d_inode, full_path, CIFS_ACL_DACL);
|
||||
if (rc == 0) /* force revalidate of the inode */
|
||||
CIFS_I(direntry->d_inode)->time = 0;
|
||||
kfree(pacl);
|
||||
}
|
||||
#else
|
||||
cFYI(1, "Set CIFS ACL not supported yet");
|
||||
#endif /* CONFIG_CIFS_ACL */
|
||||
}
|
||||
} else {
|
||||
int temp;
|
||||
temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
|
||||
|
|
|
@ -938,8 +938,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
|
|||
struct file_system_type *type = inode->i_sb->s_type;
|
||||
|
||||
/* Set new key only if filesystem hasn't already changed it */
|
||||
if (!lockdep_match_class(&inode->i_mutex,
|
||||
&type->i_mutex_key)) {
|
||||
if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
|
||||
/*
|
||||
* ensure nobody is actually holding i_mutex
|
||||
*/
|
||||
|
@ -966,6 +965,7 @@ void unlock_new_inode(struct inode *inode)
|
|||
spin_lock(&inode->i_lock);
|
||||
WARN_ON(!(inode->i_state & I_NEW));
|
||||
inode->i_state &= ~I_NEW;
|
||||
smp_mb();
|
||||
wake_up_bit(&inode->i_state, __I_NEW);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
|
|
@ -2162,7 +2162,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
|
|||
/* sayonara */
|
||||
error = complete_walk(nd);
|
||||
if (error)
|
||||
return ERR_PTR(-ECHILD);
|
||||
return ERR_PTR(error);
|
||||
|
||||
error = -ENOTDIR;
|
||||
if (nd->flags & LOOKUP_DIRECTORY) {
|
||||
|
@ -2261,7 +2261,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
|
|||
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
|
||||
error = complete_walk(nd);
|
||||
if (error)
|
||||
goto exit;
|
||||
return ERR_PTR(error);
|
||||
error = -EISDIR;
|
||||
if (S_ISDIR(nd->inode->i_mode))
|
||||
goto exit;
|
||||
|
|
|
@ -201,12 +201,10 @@ out:
|
|||
static int udf_release_file(struct inode *inode, struct file *filp)
|
||||
{
|
||||
if (filp->f_mode & FMODE_WRITE) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
down_write(&UDF_I(inode)->i_data_sem);
|
||||
udf_discard_prealloc(inode);
|
||||
udf_truncate_tail_extent(inode);
|
||||
up_write(&UDF_I(inode)->i_data_sem);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
|
|||
|
||||
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
|
||||
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
|
||||
int partno, sector_t start,
|
||||
sector_t len, int flags,
|
||||
|
|
|
@ -6,8 +6,11 @@
|
|||
#include <linux/workqueue.h>
|
||||
|
||||
enum {
|
||||
ICQ_IOPRIO_CHANGED,
|
||||
ICQ_CGROUP_CHANGED,
|
||||
ICQ_IOPRIO_CHANGED = 1 << 0,
|
||||
ICQ_CGROUP_CHANGED = 1 << 1,
|
||||
ICQ_EXITED = 1 << 2,
|
||||
|
||||
ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -88,7 +91,7 @@ struct io_cq {
|
|||
struct rcu_head __rcu_head;
|
||||
};
|
||||
|
||||
unsigned long changed;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -139,6 +142,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
|||
gfp_t gfp_flags, int node);
|
||||
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
|
||||
void ioc_cgroup_changed(struct io_context *ioc);
|
||||
unsigned int icq_get_changed(struct io_cq *icq);
|
||||
#else
|
||||
struct io_context;
|
||||
static inline void put_io_context(struct io_context *ioc) { }
|
||||
|
|
|
@ -289,12 +289,16 @@ enum {
|
|||
*
|
||||
* system_freezable_wq is equivalent to system_wq except that it's
|
||||
* freezable.
|
||||
*
|
||||
* system_nrt_freezable_wq is equivalent to system_nrt_wq except that
|
||||
* it's freezable.
|
||||
*/
|
||||
extern struct workqueue_struct *system_wq;
|
||||
extern struct workqueue_struct *system_long_wq;
|
||||
extern struct workqueue_struct *system_nrt_wq;
|
||||
extern struct workqueue_struct *system_unbound_wq;
|
||||
extern struct workqueue_struct *system_freezable_wq;
|
||||
extern struct workqueue_struct *system_nrt_freezable_wq;
|
||||
|
||||
extern struct workqueue_struct *
|
||||
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
||||
|
|
|
@ -253,11 +253,13 @@ struct workqueue_struct *system_long_wq __read_mostly;
|
|||
struct workqueue_struct *system_nrt_wq __read_mostly;
|
||||
struct workqueue_struct *system_unbound_wq __read_mostly;
|
||||
struct workqueue_struct *system_freezable_wq __read_mostly;
|
||||
struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(system_wq);
|
||||
EXPORT_SYMBOL_GPL(system_long_wq);
|
||||
EXPORT_SYMBOL_GPL(system_nrt_wq);
|
||||
EXPORT_SYMBOL_GPL(system_unbound_wq);
|
||||
EXPORT_SYMBOL_GPL(system_freezable_wq);
|
||||
EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/workqueue.h>
|
||||
|
@ -3833,8 +3835,11 @@ static int __init init_workqueues(void)
|
|||
WQ_UNBOUND_MAX_ACTIVE);
|
||||
system_freezable_wq = alloc_workqueue("events_freezable",
|
||||
WQ_FREEZABLE, 0);
|
||||
system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
|
||||
WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
|
||||
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
|
||||
!system_unbound_wq || !system_freezable_wq);
|
||||
!system_unbound_wq || !system_freezable_wq ||
|
||||
!system_nrt_freezable_wq);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_workqueues);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/dynamic_queue_limits.h>
|
||||
|
||||
#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
|
||||
|
|
|
@ -278,6 +278,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
|||
struct rtable *rt;
|
||||
__u8 rcv_wscale;
|
||||
bool ecn_ok = false;
|
||||
struct flowi4 fl4;
|
||||
|
||||
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
|
||||
goto out;
|
||||
|
@ -346,20 +347,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
|||
* hasn't changed since we received the original syn, but I see
|
||||
* no easy way to do this.
|
||||
*/
|
||||
{
|
||||
struct flowi4 fl4;
|
||||
|
||||
flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
|
||||
RT_SCOPE_UNIVERSE, IPPROTO_TCP,
|
||||
inet_sk_flowi_flags(sk),
|
||||
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
|
||||
ireq->loc_addr, th->source, th->dest);
|
||||
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
|
||||
rt = ip_route_output_key(sock_net(sk), &fl4);
|
||||
if (IS_ERR(rt)) {
|
||||
reqsk_free(req);
|
||||
goto out;
|
||||
}
|
||||
flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
|
||||
RT_SCOPE_UNIVERSE, IPPROTO_TCP,
|
||||
inet_sk_flowi_flags(sk),
|
||||
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
|
||||
ireq->loc_addr, th->source, th->dest);
|
||||
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
|
||||
rt = ip_route_output_key(sock_net(sk), &fl4);
|
||||
if (IS_ERR(rt)) {
|
||||
reqsk_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Try to redo what tcp_v4_send_synack did. */
|
||||
|
@ -373,5 +370,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
|||
ireq->rcv_wscale = rcv_wscale;
|
||||
|
||||
ret = get_cookie_sock(sk, skb, req, &rt->dst);
|
||||
/* ip_queue_xmit() depends on our flow being setup
|
||||
* Normal sockets get it right from inet_csk_route_child_sock()
|
||||
*/
|
||||
if (ret)
|
||||
inet_sk(ret)->cork.fl.u.ip4 = fl4;
|
||||
out: return ret;
|
||||
}
|
||||
|
|
|
@ -1466,9 +1466,13 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
|||
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
|
||||
newinet->inet_id = newtp->write_seq ^ jiffies;
|
||||
|
||||
if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
|
||||
goto put_and_exit;
|
||||
|
||||
if (!dst) {
|
||||
dst = inet_csk_route_child_sock(sk, newsk, req);
|
||||
if (!dst)
|
||||
goto put_and_exit;
|
||||
} else {
|
||||
/* syncookie case : see end of cookie_v4_check() */
|
||||
}
|
||||
sk_setup_caps(newsk, dst);
|
||||
|
||||
tcp_mtup_init(newsk);
|
||||
|
|
|
@ -165,7 +165,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
|
|||
struct tracepoint_path *path = NULL;
|
||||
DIR *sys_dir, *evt_dir;
|
||||
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
||||
char id_buf[4];
|
||||
char id_buf[24];
|
||||
int fd;
|
||||
u64 id;
|
||||
char evt_path[MAXPATHLEN];
|
||||
|
|
Loading…
Reference in New Issue