Merge branch 'for-jens' of git://git.drbd.org/linux-2.6-drbd into for-linus
This commit is contained in:
commit
575f552012
|
@ -133,46 +133,6 @@ Description:
|
||||||
The symbolic link points to the PCI device sysfs entry of the
|
The symbolic link points to the PCI device sysfs entry of the
|
||||||
Physical Function this device associates with.
|
Physical Function this device associates with.
|
||||||
|
|
||||||
|
|
||||||
What: /sys/bus/pci/slots/...
|
|
||||||
Date: April 2005 (possibly older)
|
|
||||||
KernelVersion: 2.6.12 (possibly older)
|
|
||||||
Contact: linux-pci@vger.kernel.org
|
|
||||||
Description:
|
|
||||||
When the appropriate driver is loaded, it will create a
|
|
||||||
directory per claimed physical PCI slot in
|
|
||||||
/sys/bus/pci/slots/. The names of these directories are
|
|
||||||
specific to the driver, which in turn, are specific to the
|
|
||||||
platform, but in general, should match the label on the
|
|
||||||
machine's physical chassis.
|
|
||||||
|
|
||||||
The drivers that can create slot directories include the
|
|
||||||
PCI hotplug drivers, and as of 2.6.27, the pci_slot driver.
|
|
||||||
|
|
||||||
The slot directories contain, at a minimum, a file named
|
|
||||||
'address' which contains the PCI bus:device:function tuple.
|
|
||||||
Other files may appear as well, but are specific to the
|
|
||||||
driver.
|
|
||||||
|
|
||||||
What: /sys/bus/pci/slots/.../function[0-7]
|
|
||||||
Date: March 2010
|
|
||||||
KernelVersion: 2.6.35
|
|
||||||
Contact: linux-pci@vger.kernel.org
|
|
||||||
Description:
|
|
||||||
If PCI slot directories (as described above) are created,
|
|
||||||
and the physical slot is actually populated with a device,
|
|
||||||
symbolic links in the slot directory pointing to the
|
|
||||||
device's PCI functions are created as well.
|
|
||||||
|
|
||||||
What: /sys/bus/pci/devices/.../slot
|
|
||||||
Date: March 2010
|
|
||||||
KernelVersion: 2.6.35
|
|
||||||
Contact: linux-pci@vger.kernel.org
|
|
||||||
Description:
|
|
||||||
If PCI slot directories (as described above) are created,
|
|
||||||
a symbolic link pointing to the slot directory will be
|
|
||||||
created as well.
|
|
||||||
|
|
||||||
What: /sys/bus/pci/slots/.../module
|
What: /sys/bus/pci/slots/.../module
|
||||||
Date: June 2009
|
Date: June 2009
|
||||||
Contact: linux-pci@vger.kernel.org
|
Contact: linux-pci@vger.kernel.org
|
||||||
|
|
6
Makefile
6
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 35
|
SUBLEVEL = 35
|
||||||
EXTRAVERSION = -rc2
|
EXTRAVERSION = -rc3
|
||||||
NAME = Sheep on Meth
|
NAME = Sheep on Meth
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -1095,7 +1095,7 @@ all: modules
|
||||||
# using awk while concatenating to the final file.
|
# using awk while concatenating to the final file.
|
||||||
|
|
||||||
PHONY += modules
|
PHONY += modules
|
||||||
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
|
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
|
||||||
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
|
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
|
||||||
@$(kecho) ' Building modules, stage 2.';
|
@$(kecho) ' Building modules, stage 2.';
|
||||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
||||||
|
@ -1117,7 +1117,7 @@ PHONY += modules_install
|
||||||
modules_install: _modinst_ _modinst_post
|
modules_install: _modinst_ _modinst_post
|
||||||
|
|
||||||
PHONY += _modinst_
|
PHONY += _modinst_
|
||||||
_modinst_: modules.builtin
|
_modinst_:
|
||||||
@if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
|
@if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
|
||||||
echo "Warning: you may need to install module-init-tools"; \
|
echo "Warning: you may need to install module-init-tools"; \
|
||||||
echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
|
echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
|
||||||
|
|
|
@ -1277,6 +1277,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
||||||
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
||||||
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
||||||
clear_resource:
|
clear_resource:
|
||||||
|
res->start = res->end = 0;
|
||||||
res->flags = 0;
|
res->flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -117,6 +117,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
|
||||||
* Invalidate the resource to prevent
|
* Invalidate the resource to prevent
|
||||||
* child resource allocations in this
|
* child resource allocations in this
|
||||||
* range. */
|
* range. */
|
||||||
|
r->start = r->end = 0;
|
||||||
r->flags = 0;
|
r->flags = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1309,6 +1309,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
||||||
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
||||||
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
||||||
clear_resource:
|
clear_resource:
|
||||||
|
res->start = res->end = 0;
|
||||||
res->flags = 0;
|
res->flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; }
|
||||||
struct saved_context {
|
struct saved_context {
|
||||||
u16 es, fs, gs, ss;
|
u16 es, fs, gs, ss;
|
||||||
unsigned long cr0, cr2, cr3, cr4;
|
unsigned long cr0, cr2, cr3, cr4;
|
||||||
|
u64 misc_enable;
|
||||||
|
bool misc_enable_saved;
|
||||||
struct desc_ptr gdt;
|
struct desc_ptr gdt;
|
||||||
struct desc_ptr idt;
|
struct desc_ptr idt;
|
||||||
u16 ldt;
|
u16 ldt;
|
||||||
|
|
|
@ -27,6 +27,8 @@ struct saved_context {
|
||||||
u16 ds, es, fs, gs, ss;
|
u16 ds, es, fs, gs, ss;
|
||||||
unsigned long gs_base, gs_kernel_base, fs_base;
|
unsigned long gs_base, gs_kernel_base, fs_base;
|
||||||
unsigned long cr0, cr2, cr3, cr4, cr8;
|
unsigned long cr0, cr2, cr3, cr4, cr8;
|
||||||
|
u64 misc_enable;
|
||||||
|
bool misc_enable_saved;
|
||||||
unsigned long efer;
|
unsigned long efer;
|
||||||
u16 gdt_pad;
|
u16 gdt_pad;
|
||||||
u16 gdt_limit;
|
u16 gdt_limit;
|
||||||
|
|
|
@ -96,6 +96,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
|
||||||
* the fact the PCI specs explicitly allow address decoders to be
|
* the fact the PCI specs explicitly allow address decoders to be
|
||||||
* shared between expansion ROMs and other resource regions, it's
|
* shared between expansion ROMs and other resource regions, it's
|
||||||
* at least dangerous)
|
* at least dangerous)
|
||||||
|
* - bad resource sizes or overlaps with other regions
|
||||||
*
|
*
|
||||||
* Our solution:
|
* Our solution:
|
||||||
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
|
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
|
||||||
|
@ -136,6 +137,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
|
||||||
* child resource allocations in this
|
* child resource allocations in this
|
||||||
* range.
|
* range.
|
||||||
*/
|
*/
|
||||||
|
r->start = r->end = 0;
|
||||||
r->flags = 0;
|
r->flags = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,6 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
|
||||||
ctxt->cr4 = read_cr4();
|
ctxt->cr4 = read_cr4();
|
||||||
ctxt->cr8 = read_cr8();
|
ctxt->cr8 = read_cr8();
|
||||||
#endif
|
#endif
|
||||||
|
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
|
||||||
|
&ctxt->misc_enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Needed by apm.c */
|
/* Needed by apm.c */
|
||||||
|
@ -152,6 +154,8 @@ static void fix_processor_context(void)
|
||||||
*/
|
*/
|
||||||
static void __restore_processor_state(struct saved_context *ctxt)
|
static void __restore_processor_state(struct saved_context *ctxt)
|
||||||
{
|
{
|
||||||
|
if (ctxt->misc_enable_saved)
|
||||||
|
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
|
||||||
/*
|
/*
|
||||||
* control registers
|
* control registers
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -622,6 +622,11 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
|
||||||
irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
|
irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
|
||||||
writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
|
writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The barrier is required to ensure that writes to cmd_block reach
|
||||||
|
* the memory before the write to PORT_CMD_ACTIVATE.
|
||||||
|
*/
|
||||||
|
wmb();
|
||||||
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
|
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
|
||||||
writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
|
writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
|
||||||
|
|
||||||
|
@ -865,7 +870,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
|
||||||
} else {
|
} else {
|
||||||
prb = &cb->atapi.prb;
|
prb = &cb->atapi.prb;
|
||||||
sge = cb->atapi.sge;
|
sge = cb->atapi.sge;
|
||||||
memset(cb->atapi.cdb, 0, 32);
|
memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
|
||||||
memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
|
memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
|
||||||
|
|
||||||
if (ata_is_data(qc->tf.protocol)) {
|
if (ata_is_data(qc->tf.protocol)) {
|
||||||
|
@ -895,6 +900,11 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
|
||||||
paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
|
paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
|
||||||
activate = port + PORT_CMD_ACTIVATE + tag * 8;
|
activate = port + PORT_CMD_ACTIVATE + tag * 8;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The barrier is required to ensure that writes to cmd_block reach
|
||||||
|
* the memory before the write to PORT_CMD_ACTIVATE.
|
||||||
|
*/
|
||||||
|
wmb();
|
||||||
writel((u32)paddr, activate);
|
writel((u32)paddr, activate);
|
||||||
writel((u64)paddr >> 32, activate + 4);
|
writel((u64)paddr >> 32, activate + 4);
|
||||||
|
|
||||||
|
|
|
@ -1236,8 +1236,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
|
||||||
/* Last part of the attaching process ... */
|
/* Last part of the attaching process ... */
|
||||||
if (ns.conn >= C_CONNECTED &&
|
if (ns.conn >= C_CONNECTED &&
|
||||||
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
|
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
|
||||||
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
|
|
||||||
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
|
|
||||||
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
|
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
|
||||||
drbd_send_uuids(mdev);
|
drbd_send_uuids(mdev);
|
||||||
drbd_send_state(mdev);
|
drbd_send_state(mdev);
|
||||||
|
|
|
@ -1114,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
||||||
mdev->new_state_tmp.i = ns.i;
|
mdev->new_state_tmp.i = ns.i;
|
||||||
ns.i = os.i;
|
ns.i = os.i;
|
||||||
ns.disk = D_NEGOTIATING;
|
ns.disk = D_NEGOTIATING;
|
||||||
|
|
||||||
|
/* We expect to receive up-to-date UUIDs soon.
|
||||||
|
To avoid a race in receive_state, free p_uuid while
|
||||||
|
holding req_lock. I.e. atomic with the state change */
|
||||||
|
kfree(mdev->p_uuid);
|
||||||
|
mdev->p_uuid = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
|
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
|
||||||
|
|
|
@ -598,8 +598,8 @@ rx_next:
|
||||||
goto rx_status_loop;
|
goto rx_status_loop;
|
||||||
|
|
||||||
spin_lock_irqsave(&cp->lock, flags);
|
spin_lock_irqsave(&cp->lock, flags);
|
||||||
cpw16_f(IntrMask, cp_intr_mask);
|
|
||||||
__napi_complete(napi);
|
__napi_complete(napi);
|
||||||
|
cpw16_f(IntrMask, cp_intr_mask);
|
||||||
spin_unlock_irqrestore(&cp->lock, flags);
|
spin_unlock_irqrestore(&cp->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2089,8 +2089,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
|
||||||
* again when we think we are done.
|
* again when we think we are done.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&tp->lock, flags);
|
spin_lock_irqsave(&tp->lock, flags);
|
||||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
|
||||||
__napi_complete(napi);
|
__napi_complete(napi);
|
||||||
|
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||||
spin_unlock_irqrestore(&tp->lock, flags);
|
spin_unlock_irqrestore(&tp->lock, flags);
|
||||||
}
|
}
|
||||||
spin_unlock(&tp->rx_lock);
|
spin_unlock(&tp->rx_lock);
|
||||||
|
|
|
@ -747,8 +747,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
|
||||||
FSL_GIANFAR_DEV_HAS_CSUM |
|
FSL_GIANFAR_DEV_HAS_CSUM |
|
||||||
FSL_GIANFAR_DEV_HAS_VLAN |
|
FSL_GIANFAR_DEV_HAS_VLAN |
|
||||||
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
|
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
|
||||||
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
|
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
|
||||||
FSL_GIANFAR_DEV_HAS_TIMER;
|
|
||||||
|
|
||||||
ctype = of_get_property(np, "phy-connection-type", NULL);
|
ctype = of_get_property(np, "phy-connection-type", NULL);
|
||||||
|
|
||||||
|
|
|
@ -322,6 +322,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
smc = netdev_priv(dev);
|
smc = netdev_priv(dev);
|
||||||
smc->p_dev = link;
|
smc->p_dev = link;
|
||||||
|
link->priv = dev;
|
||||||
|
|
||||||
spin_lock_init(&smc->lock);
|
spin_lock_init(&smc->lock);
|
||||||
link->io.NumPorts1 = 16;
|
link->io.NumPorts1 = 16;
|
||||||
|
|
|
@ -53,6 +53,9 @@
|
||||||
|
|
||||||
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
|
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
|
||||||
|
|
||||||
|
/* register definitions for the 973 */
|
||||||
|
#define MII_LXT973_PCR 16 /* Port Configuration Register */
|
||||||
|
#define PCR_FIBER_SELECT 1
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Intel LXT PHY driver");
|
MODULE_DESCRIPTION("Intel LXT PHY driver");
|
||||||
MODULE_AUTHOR("Andy Fleming");
|
MODULE_AUTHOR("Andy Fleming");
|
||||||
|
@ -119,6 +122,33 @@ static int lxt971_config_intr(struct phy_device *phydev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int lxt973_probe(struct phy_device *phydev)
|
||||||
|
{
|
||||||
|
int val = phy_read(phydev, MII_LXT973_PCR);
|
||||||
|
|
||||||
|
if (val & PCR_FIBER_SELECT) {
|
||||||
|
/*
|
||||||
|
* If fiber is selected, then the only correct setting
|
||||||
|
* is 100Mbps, full duplex, and auto negotiation off.
|
||||||
|
*/
|
||||||
|
val = phy_read(phydev, MII_BMCR);
|
||||||
|
val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
|
||||||
|
val &= ~BMCR_ANENABLE;
|
||||||
|
phy_write(phydev, MII_BMCR, val);
|
||||||
|
/* Remember that the port is in fiber mode. */
|
||||||
|
phydev->priv = lxt973_probe;
|
||||||
|
} else {
|
||||||
|
phydev->priv = NULL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int lxt973_config_aneg(struct phy_device *phydev)
|
||||||
|
{
|
||||||
|
/* Do nothing if port is in fiber mode. */
|
||||||
|
return phydev->priv ? 0 : genphy_config_aneg(phydev);
|
||||||
|
}
|
||||||
|
|
||||||
static struct phy_driver lxt970_driver = {
|
static struct phy_driver lxt970_driver = {
|
||||||
.phy_id = 0x78100000,
|
.phy_id = 0x78100000,
|
||||||
.name = "LXT970",
|
.name = "LXT970",
|
||||||
|
@ -146,6 +176,18 @@ static struct phy_driver lxt971_driver = {
|
||||||
.driver = { .owner = THIS_MODULE,},
|
.driver = { .owner = THIS_MODULE,},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct phy_driver lxt973_driver = {
|
||||||
|
.phy_id = 0x00137a10,
|
||||||
|
.name = "LXT973",
|
||||||
|
.phy_id_mask = 0xfffffff0,
|
||||||
|
.features = PHY_BASIC_FEATURES,
|
||||||
|
.flags = 0,
|
||||||
|
.probe = lxt973_probe,
|
||||||
|
.config_aneg = lxt973_config_aneg,
|
||||||
|
.read_status = genphy_read_status,
|
||||||
|
.driver = { .owner = THIS_MODULE,},
|
||||||
|
};
|
||||||
|
|
||||||
static int __init lxt_init(void)
|
static int __init lxt_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -157,8 +199,14 @@ static int __init lxt_init(void)
|
||||||
ret = phy_driver_register(&lxt971_driver);
|
ret = phy_driver_register(&lxt971_driver);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err2;
|
goto err2;
|
||||||
|
|
||||||
|
ret = phy_driver_register(&lxt973_driver);
|
||||||
|
if (ret)
|
||||||
|
goto err3;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err3:
|
||||||
|
phy_driver_unregister(&lxt971_driver);
|
||||||
err2:
|
err2:
|
||||||
phy_driver_unregister(&lxt970_driver);
|
phy_driver_unregister(&lxt970_driver);
|
||||||
err1:
|
err1:
|
||||||
|
@ -169,6 +217,7 @@ static void __exit lxt_exit(void)
|
||||||
{
|
{
|
||||||
phy_driver_unregister(&lxt970_driver);
|
phy_driver_unregister(&lxt970_driver);
|
||||||
phy_driver_unregister(&lxt971_driver);
|
phy_driver_unregister(&lxt971_driver);
|
||||||
|
phy_driver_unregister(&lxt973_driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(lxt_init);
|
module_init(lxt_init);
|
||||||
|
|
|
@ -560,10 +560,10 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
|
||||||
udelay(25);
|
udelay(25);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Some configurations require a small delay even after the write
|
* According to hardware specs a 20us delay is required after write
|
||||||
* completed indication or the next write might fail.
|
* complete indication, but before sending next command.
|
||||||
*/
|
*/
|
||||||
udelay(25);
|
udelay(20);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mdio_read(void __iomem *ioaddr, int reg_addr)
|
static int mdio_read(void __iomem *ioaddr, int reg_addr)
|
||||||
|
@ -583,6 +583,12 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
|
||||||
}
|
}
|
||||||
udelay(25);
|
udelay(25);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* According to hardware specs a 20us delay is required after read
|
||||||
|
* complete indication, but before sending next command.
|
||||||
|
*/
|
||||||
|
udelay(20);
|
||||||
|
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1192,7 +1192,7 @@ int i2400m_fw_hdr_check(struct i2400m *i2400m,
|
||||||
unsigned module_type, header_len, major_version, minor_version,
|
unsigned module_type, header_len, major_version, minor_version,
|
||||||
module_id, module_vendor, date, size;
|
module_id, module_vendor, date, size;
|
||||||
|
|
||||||
module_type = bcf_hdr->module_type;
|
module_type = le32_to_cpu(bcf_hdr->module_type);
|
||||||
header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
|
header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
|
||||||
major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
|
major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
|
||||||
>> 16;
|
>> 16;
|
||||||
|
|
|
@ -828,7 +828,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
pci_name(pdev), err);
|
pci_name(pdev), err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
bus = pdev->subordinate;
|
bus = pdev->subordinate;
|
||||||
|
if (!bus) {
|
||||||
|
dev_notice(&pdev->dev, "the device is not a bridge, "
|
||||||
|
"skipping\n");
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto err_disable_device;
|
||||||
|
}
|
||||||
|
|
||||||
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
|
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
|
||||||
* discovery
|
* discovery
|
||||||
|
|
|
@ -1035,39 +1035,6 @@ error:
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_remove_slot_links(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
char func[10];
|
|
||||||
struct pci_slot *slot;
|
|
||||||
|
|
||||||
sysfs_remove_link(&dev->dev.kobj, "slot");
|
|
||||||
list_for_each_entry(slot, &dev->bus->slots, list) {
|
|
||||||
if (slot->number != PCI_SLOT(dev->devfn))
|
|
||||||
continue;
|
|
||||||
snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
|
|
||||||
sysfs_remove_link(&slot->kobj, func);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pci_create_slot_links(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
int result = 0;
|
|
||||||
char func[10];
|
|
||||||
struct pci_slot *slot;
|
|
||||||
|
|
||||||
list_for_each_entry(slot, &dev->bus->slots, list) {
|
|
||||||
if (slot->number != PCI_SLOT(dev->devfn))
|
|
||||||
continue;
|
|
||||||
result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
|
|
||||||
if (result)
|
|
||||||
goto out;
|
|
||||||
snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
|
|
||||||
result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
|
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
@ -1130,8 +1097,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
|
||||||
if (retval)
|
if (retval)
|
||||||
goto err_vga_file;
|
goto err_vga_file;
|
||||||
|
|
||||||
pci_create_slot_links(pdev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_vga_file:
|
err_vga_file:
|
||||||
|
@ -1181,8 +1146,6 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
|
||||||
if (!sysfs_initialized)
|
if (!sysfs_initialized)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pci_remove_slot_links(pdev);
|
|
||||||
|
|
||||||
pci_remove_capabilities_sysfs(pdev);
|
pci_remove_capabilities_sysfs(pdev);
|
||||||
|
|
||||||
if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
|
if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
|
||||||
|
|
|
@ -97,14 +97,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
|
||||||
|
|
||||||
root = pci_find_parent_resource(dev, res);
|
root = pci_find_parent_resource(dev, res);
|
||||||
if (!root) {
|
if (!root) {
|
||||||
dev_err(&dev->dev, "no compatible bridge window for %pR\n",
|
dev_info(&dev->dev, "no compatible bridge window for %pR\n",
|
||||||
res);
|
res);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
conflict = request_resource_conflict(root, res);
|
conflict = request_resource_conflict(root, res);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
dev_err(&dev->dev,
|
dev_info(&dev->dev,
|
||||||
"address space collision: %pR conflicts with %s %pR\n",
|
"address space collision: %pR conflicts with %s %pR\n",
|
||||||
res, conflict->name, conflict);
|
res, conflict->name, conflict);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
|
@ -97,50 +97,6 @@ static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
|
||||||
return bus_speed_read(slot->bus->cur_bus_speed, buf);
|
return bus_speed_read(slot->bus->cur_bus_speed, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void remove_sysfs_files(struct pci_slot *slot)
|
|
||||||
{
|
|
||||||
char func[10];
|
|
||||||
struct list_head *tmp;
|
|
||||||
|
|
||||||
list_for_each(tmp, &slot->bus->devices) {
|
|
||||||
struct pci_dev *dev = pci_dev_b(tmp);
|
|
||||||
if (PCI_SLOT(dev->devfn) != slot->number)
|
|
||||||
continue;
|
|
||||||
sysfs_remove_link(&dev->dev.kobj, "slot");
|
|
||||||
|
|
||||||
snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
|
|
||||||
sysfs_remove_link(&slot->kobj, func);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int create_sysfs_files(struct pci_slot *slot)
|
|
||||||
{
|
|
||||||
int result;
|
|
||||||
char func[10];
|
|
||||||
struct list_head *tmp;
|
|
||||||
|
|
||||||
list_for_each(tmp, &slot->bus->devices) {
|
|
||||||
struct pci_dev *dev = pci_dev_b(tmp);
|
|
||||||
if (PCI_SLOT(dev->devfn) != slot->number)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
|
|
||||||
if (result)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
|
|
||||||
result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
|
|
||||||
if (result)
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
fail:
|
|
||||||
remove_sysfs_files(slot);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pci_slot_release(struct kobject *kobj)
|
static void pci_slot_release(struct kobject *kobj)
|
||||||
{
|
{
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
|
@ -153,8 +109,6 @@ static void pci_slot_release(struct kobject *kobj)
|
||||||
if (PCI_SLOT(dev->devfn) == slot->number)
|
if (PCI_SLOT(dev->devfn) == slot->number)
|
||||||
dev->slot = NULL;
|
dev->slot = NULL;
|
||||||
|
|
||||||
remove_sysfs_files(slot);
|
|
||||||
|
|
||||||
list_del(&slot->list);
|
list_del(&slot->list);
|
||||||
|
|
||||||
kfree(slot);
|
kfree(slot);
|
||||||
|
@ -346,8 +300,6 @@ placeholder:
|
||||||
INIT_LIST_HEAD(&slot->list);
|
INIT_LIST_HEAD(&slot->list);
|
||||||
list_add(&slot->list, &parent->slots);
|
list_add(&slot->list, &parent->slots);
|
||||||
|
|
||||||
create_sysfs_files(slot);
|
|
||||||
|
|
||||||
list_for_each_entry(dev, &parent->devices, bus_list)
|
list_for_each_entry(dev, &parent->devices, bus_list)
|
||||||
if (PCI_SLOT(dev->devfn) == slot_nr)
|
if (PCI_SLOT(dev->devfn) == slot_nr)
|
||||||
dev->slot = slot;
|
dev->slot = slot;
|
||||||
|
|
|
@ -671,6 +671,7 @@ static void pcmcia_requery(struct pcmcia_socket *s)
|
||||||
if (old_funcs != new_funcs) {
|
if (old_funcs != new_funcs) {
|
||||||
/* we need to re-start */
|
/* we need to re-start */
|
||||||
pcmcia_card_remove(s, NULL);
|
pcmcia_card_remove(s, NULL);
|
||||||
|
s->functions = 0;
|
||||||
pcmcia_card_add(s);
|
pcmcia_card_add(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -880,6 +880,12 @@ static struct cardbus_type cardbus_type[] = {
|
||||||
.restore_state = ti_restore_state,
|
.restore_state = ti_restore_state,
|
||||||
.sock_init = ti_init,
|
.sock_init = ti_init,
|
||||||
},
|
},
|
||||||
|
[CARDBUS_TYPE_ENE] = {
|
||||||
|
.override = ene_override,
|
||||||
|
.save_state = ti_save_state,
|
||||||
|
.restore_state = ti_restore_state,
|
||||||
|
.sock_init = ti_init,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_YENTA_RICOH
|
#ifdef CONFIG_YENTA_RICOH
|
||||||
[CARDBUS_TYPE_RICOH] = {
|
[CARDBUS_TYPE_RICOH] = {
|
||||||
|
@ -902,14 +908,6 @@ static struct cardbus_type cardbus_type[] = {
|
||||||
.restore_state = o2micro_restore_state,
|
.restore_state = o2micro_restore_state,
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_YENTA_TI
|
|
||||||
[CARDBUS_TYPE_ENE] = {
|
|
||||||
.override = ene_override,
|
|
||||||
.save_state = ti_save_state,
|
|
||||||
.restore_state = ti_restore_state,
|
|
||||||
.sock_init = ti_init,
|
|
||||||
},
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -975,7 +973,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
|
||||||
/* probes the PCI interrupt, use only on override functions */
|
/* probes the PCI interrupt, use only on override functions */
|
||||||
static int yenta_probe_cb_irq(struct yenta_socket *socket)
|
static int yenta_probe_cb_irq(struct yenta_socket *socket)
|
||||||
{
|
{
|
||||||
u8 reg;
|
u8 reg = 0;
|
||||||
|
|
||||||
if (!socket->cb_irq)
|
if (!socket->cb_irq)
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -989,6 +987,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* generate interrupt, wait */
|
/* generate interrupt, wait */
|
||||||
|
if (!socket->dev->irq)
|
||||||
reg = exca_readb(socket, I365_CSCINT);
|
reg = exca_readb(socket, I365_CSCINT);
|
||||||
exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
|
exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
|
||||||
cb_writel(socket, CB_SOCKET_EVENT, -1);
|
cb_writel(socket, CB_SOCKET_EVENT, -1);
|
||||||
|
|
|
@ -60,6 +60,8 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
|
||||||
size = __btrfs_getxattr(inode, name, value, size);
|
size = __btrfs_getxattr(inode, name, value, size);
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
acl = posix_acl_from_xattr(value, size);
|
acl = posix_acl_from_xattr(value, size);
|
||||||
|
if (IS_ERR(acl))
|
||||||
|
return acl;
|
||||||
set_cached_acl(inode, type, acl);
|
set_cached_acl(inode, type, acl);
|
||||||
}
|
}
|
||||||
kfree(value);
|
kfree(value);
|
||||||
|
@ -160,6 +162,12 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
|
||||||
int ret;
|
int ret;
|
||||||
struct posix_acl *acl = NULL;
|
struct posix_acl *acl = NULL;
|
||||||
|
|
||||||
|
if (!is_owner_or_cap(dentry->d_inode))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
if (!IS_POSIXACL(dentry->d_inode))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (value) {
|
if (value) {
|
||||||
acl = posix_acl_from_xattr(value, size);
|
acl = posix_acl_from_xattr(value, size);
|
||||||
if (acl == NULL) {
|
if (acl == NULL) {
|
||||||
|
|
|
@ -1941,8 +1941,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||||
btrfs_level_size(tree_root,
|
btrfs_level_size(tree_root,
|
||||||
btrfs_super_log_root_level(disk_super));
|
btrfs_super_log_root_level(disk_super));
|
||||||
|
|
||||||
log_tree_root = kzalloc(sizeof(struct btrfs_root),
|
log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
|
||||||
GFP_NOFS);
|
if (!log_tree_root) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto fail_trans_kthread;
|
||||||
|
}
|
||||||
|
|
||||||
__setup_root(nodesize, leafsize, sectorsize, stripesize,
|
__setup_root(nodesize, leafsize, sectorsize, stripesize,
|
||||||
log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
|
log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
|
||||||
|
@ -1982,6 +1985,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||||
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
|
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
|
||||||
if (!fs_info->fs_root)
|
if (!fs_info->fs_root)
|
||||||
goto fail_trans_kthread;
|
goto fail_trans_kthread;
|
||||||
|
if (IS_ERR(fs_info->fs_root)) {
|
||||||
|
err = PTR_ERR(fs_info->fs_root);
|
||||||
|
goto fail_trans_kthread;
|
||||||
|
}
|
||||||
|
|
||||||
if (!(sb->s_flags & MS_RDONLY)) {
|
if (!(sb->s_flags & MS_RDONLY)) {
|
||||||
down_read(&fs_info->cleanup_work_sem);
|
down_read(&fs_info->cleanup_work_sem);
|
||||||
|
|
|
@ -4360,7 +4360,8 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
block_rsv = get_block_rsv(trans, root);
|
block_rsv = get_block_rsv(trans, root);
|
||||||
cache = btrfs_lookup_block_group(root->fs_info, buf->start);
|
cache = btrfs_lookup_block_group(root->fs_info, buf->start);
|
||||||
BUG_ON(block_rsv->space_info != cache->space_info);
|
if (block_rsv->space_info != cache->space_info)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (btrfs_header_generation(buf) == trans->transid) {
|
if (btrfs_header_generation(buf) == trans->transid) {
|
||||||
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||||
|
|
|
@ -1140,7 +1140,7 @@ int btrfs_sync_file(struct file *file, int datasync)
|
||||||
/*
|
/*
|
||||||
* ok we haven't committed the transaction yet, lets do a commit
|
* ok we haven't committed the transaction yet, lets do a commit
|
||||||
*/
|
*/
|
||||||
if (file && file->private_data)
|
if (file->private_data)
|
||||||
btrfs_ioctl_trans_end(file);
|
btrfs_ioctl_trans_end(file);
|
||||||
|
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
|
@ -1190,14 +1190,22 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
|
||||||
|
|
||||||
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
vma->vm_ops = &btrfs_file_vm_ops;
|
struct address_space *mapping = filp->f_mapping;
|
||||||
|
|
||||||
|
if (!mapping->a_ops->readpage)
|
||||||
|
return -ENOEXEC;
|
||||||
|
|
||||||
file_accessed(filp);
|
file_accessed(filp);
|
||||||
|
vma->vm_ops = &btrfs_file_vm_ops;
|
||||||
|
vma->vm_flags |= VM_CAN_NONLINEAR;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct file_operations btrfs_file_operations = {
|
const struct file_operations btrfs_file_operations = {
|
||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
.read = do_sync_read,
|
.read = do_sync_read,
|
||||||
|
.write = do_sync_write,
|
||||||
.aio_read = generic_file_aio_read,
|
.aio_read = generic_file_aio_read,
|
||||||
.splice_read = generic_file_splice_read,
|
.splice_read = generic_file_splice_read,
|
||||||
.aio_write = btrfs_file_aio_write,
|
.aio_write = btrfs_file_aio_write,
|
||||||
|
|
|
@ -2673,7 +2673,7 @@ static int check_path_shared(struct btrfs_root *root,
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
int level;
|
int level;
|
||||||
int ret;
|
int ret;
|
||||||
u64 refs;
|
u64 refs = 1;
|
||||||
|
|
||||||
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
|
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
|
||||||
if (!path->nodes[level])
|
if (!path->nodes[level])
|
||||||
|
@ -6884,7 +6884,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
|
||||||
if (em->block_start == EXTENT_MAP_HOLE ||
|
if (em->block_start == EXTENT_MAP_HOLE ||
|
||||||
(cur_offset >= inode->i_size &&
|
(cur_offset >= inode->i_size &&
|
||||||
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
|
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
|
||||||
ret = btrfs_prealloc_file_range(inode, 0, cur_offset,
|
ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
|
||||||
last_byte - cur_offset,
|
last_byte - cur_offset,
|
||||||
1 << inode->i_blkbits,
|
1 << inode->i_blkbits,
|
||||||
offset + len,
|
offset + len,
|
||||||
|
|
|
@ -1280,7 +1280,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
||||||
trans = btrfs_start_transaction(root, 0);
|
trans = btrfs_start_transaction(root, 0);
|
||||||
if (IS_ERR(trans)) {
|
if (IS_ERR(trans)) {
|
||||||
err = PTR_ERR(trans);
|
err = PTR_ERR(trans);
|
||||||
goto out;
|
goto out_up_write;
|
||||||
}
|
}
|
||||||
trans->block_rsv = &root->fs_info->global_block_rsv;
|
trans->block_rsv = &root->fs_info->global_block_rsv;
|
||||||
|
|
||||||
|
@ -1845,7 +1845,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
||||||
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
||||||
di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
|
di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
|
||||||
dir_id, "default", 7, 1);
|
dir_id, "default", 7, 1);
|
||||||
if (!di) {
|
if (IS_ERR_OR_NULL(di)) {
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
btrfs_end_transaction(trans, root);
|
btrfs_end_transaction(trans, root);
|
||||||
printk(KERN_ERR "Umm, you don't have the default dir item, "
|
printk(KERN_ERR "Umm, you don't have the default dir item, "
|
||||||
|
|
|
@ -784,16 +784,17 @@ again:
|
||||||
struct btrfs_extent_ref_v0 *ref0;
|
struct btrfs_extent_ref_v0 *ref0;
|
||||||
ref0 = btrfs_item_ptr(eb, path1->slots[0],
|
ref0 = btrfs_item_ptr(eb, path1->slots[0],
|
||||||
struct btrfs_extent_ref_v0);
|
struct btrfs_extent_ref_v0);
|
||||||
root = find_tree_root(rc, eb, ref0);
|
|
||||||
if (!root->ref_cows)
|
|
||||||
cur->cowonly = 1;
|
|
||||||
if (key.objectid == key.offset) {
|
if (key.objectid == key.offset) {
|
||||||
|
root = find_tree_root(rc, eb, ref0);
|
||||||
if (root && !should_ignore_root(root))
|
if (root && !should_ignore_root(root))
|
||||||
cur->root = root;
|
cur->root = root;
|
||||||
else
|
else
|
||||||
list_add(&cur->list, &useless);
|
list_add(&cur->list, &useless);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if (is_cowonly_root(btrfs_ref_root_v0(eb,
|
||||||
|
ref0)))
|
||||||
|
cur->cowonly = 1;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
|
BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
|
||||||
|
|
|
@ -330,7 +330,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||||
{
|
{
|
||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
int ret;
|
int ret;
|
||||||
u32 refs;
|
|
||||||
struct btrfs_root_item *ri;
|
struct btrfs_root_item *ri;
|
||||||
struct extent_buffer *leaf;
|
struct extent_buffer *leaf;
|
||||||
|
|
||||||
|
@ -344,8 +343,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||||
leaf = path->nodes[0];
|
leaf = path->nodes[0];
|
||||||
ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
|
ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
|
||||||
|
|
||||||
refs = btrfs_disk_root_refs(leaf, ri);
|
|
||||||
BUG_ON(refs != 0);
|
|
||||||
ret = btrfs_del_item(trans, root, path);
|
ret = btrfs_del_item(trans, root, path);
|
||||||
out:
|
out:
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
|
|
|
@ -360,6 +360,8 @@ static struct dentry *get_default_root(struct super_block *sb,
|
||||||
*/
|
*/
|
||||||
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
|
||||||
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
|
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
|
||||||
|
if (IS_ERR(di))
|
||||||
|
return ERR_CAST(di);
|
||||||
if (!di) {
|
if (!di) {
|
||||||
/*
|
/*
|
||||||
* Ok the default dir item isn't there. This is weird since
|
* Ok the default dir item isn't there. This is weird since
|
||||||
|
@ -390,8 +392,8 @@ setup_root:
|
||||||
location.offset = 0;
|
location.offset = 0;
|
||||||
|
|
||||||
inode = btrfs_iget(sb, &location, new_root, &new);
|
inode = btrfs_iget(sb, &location, new_root, &new);
|
||||||
if (!inode)
|
if (IS_ERR(inode))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_CAST(inode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we're just mounting the root most subvol put the inode and return
|
* If we're just mounting the root most subvol put the inode and return
|
||||||
|
|
|
@ -981,6 +981,46 @@ static int send_cap_msg(struct ceph_mds_session *session,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __queue_cap_release(struct ceph_mds_session *session,
|
||||||
|
u64 ino, u64 cap_id, u32 migrate_seq,
|
||||||
|
u32 issue_seq)
|
||||||
|
{
|
||||||
|
struct ceph_msg *msg;
|
||||||
|
struct ceph_mds_cap_release *head;
|
||||||
|
struct ceph_mds_cap_item *item;
|
||||||
|
|
||||||
|
spin_lock(&session->s_cap_lock);
|
||||||
|
BUG_ON(!session->s_num_cap_releases);
|
||||||
|
msg = list_first_entry(&session->s_cap_releases,
|
||||||
|
struct ceph_msg, list_head);
|
||||||
|
|
||||||
|
dout(" adding %llx release to mds%d msg %p (%d left)\n",
|
||||||
|
ino, session->s_mds, msg, session->s_num_cap_releases);
|
||||||
|
|
||||||
|
BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
|
||||||
|
head = msg->front.iov_base;
|
||||||
|
head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
|
||||||
|
item = msg->front.iov_base + msg->front.iov_len;
|
||||||
|
item->ino = cpu_to_le64(ino);
|
||||||
|
item->cap_id = cpu_to_le64(cap_id);
|
||||||
|
item->migrate_seq = cpu_to_le32(migrate_seq);
|
||||||
|
item->seq = cpu_to_le32(issue_seq);
|
||||||
|
|
||||||
|
session->s_num_cap_releases--;
|
||||||
|
|
||||||
|
msg->front.iov_len += sizeof(*item);
|
||||||
|
if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
|
||||||
|
dout(" release msg %p full\n", msg);
|
||||||
|
list_move_tail(&msg->list_head, &session->s_cap_releases_done);
|
||||||
|
} else {
|
||||||
|
dout(" release msg %p at %d/%d (%d)\n", msg,
|
||||||
|
(int)le32_to_cpu(head->num),
|
||||||
|
(int)CEPH_CAPS_PER_RELEASE,
|
||||||
|
(int)msg->front.iov_len);
|
||||||
|
}
|
||||||
|
spin_unlock(&session->s_cap_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Queue cap releases when an inode is dropped from our cache. Since
|
* Queue cap releases when an inode is dropped from our cache. Since
|
||||||
* inode is about to be destroyed, there is no need for i_lock.
|
* inode is about to be destroyed, there is no need for i_lock.
|
||||||
|
@ -994,41 +1034,9 @@ void ceph_queue_caps_release(struct inode *inode)
|
||||||
while (p) {
|
while (p) {
|
||||||
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
|
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
|
||||||
struct ceph_mds_session *session = cap->session;
|
struct ceph_mds_session *session = cap->session;
|
||||||
struct ceph_msg *msg;
|
|
||||||
struct ceph_mds_cap_release *head;
|
|
||||||
struct ceph_mds_cap_item *item;
|
|
||||||
|
|
||||||
spin_lock(&session->s_cap_lock);
|
__queue_cap_release(session, ceph_ino(inode), cap->cap_id,
|
||||||
BUG_ON(!session->s_num_cap_releases);
|
cap->mseq, cap->issue_seq);
|
||||||
msg = list_first_entry(&session->s_cap_releases,
|
|
||||||
struct ceph_msg, list_head);
|
|
||||||
|
|
||||||
dout(" adding %p release to mds%d msg %p (%d left)\n",
|
|
||||||
inode, session->s_mds, msg, session->s_num_cap_releases);
|
|
||||||
|
|
||||||
BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
|
|
||||||
head = msg->front.iov_base;
|
|
||||||
head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
|
|
||||||
item = msg->front.iov_base + msg->front.iov_len;
|
|
||||||
item->ino = cpu_to_le64(ceph_ino(inode));
|
|
||||||
item->cap_id = cpu_to_le64(cap->cap_id);
|
|
||||||
item->migrate_seq = cpu_to_le32(cap->mseq);
|
|
||||||
item->seq = cpu_to_le32(cap->issue_seq);
|
|
||||||
|
|
||||||
session->s_num_cap_releases--;
|
|
||||||
|
|
||||||
msg->front.iov_len += sizeof(*item);
|
|
||||||
if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
|
|
||||||
dout(" release msg %p full\n", msg);
|
|
||||||
list_move_tail(&msg->list_head,
|
|
||||||
&session->s_cap_releases_done);
|
|
||||||
} else {
|
|
||||||
dout(" release msg %p at %d/%d (%d)\n", msg,
|
|
||||||
(int)le32_to_cpu(head->num),
|
|
||||||
(int)CEPH_CAPS_PER_RELEASE,
|
|
||||||
(int)msg->front.iov_len);
|
|
||||||
}
|
|
||||||
spin_unlock(&session->s_cap_lock);
|
|
||||||
p = rb_next(p);
|
p = rb_next(p);
|
||||||
__ceph_remove_cap(cap);
|
__ceph_remove_cap(cap);
|
||||||
}
|
}
|
||||||
|
@ -2655,7 +2663,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
||||||
struct ceph_mds_caps *h;
|
struct ceph_mds_caps *h;
|
||||||
int mds = session->s_mds;
|
int mds = session->s_mds;
|
||||||
int op;
|
int op;
|
||||||
u32 seq;
|
u32 seq, mseq;
|
||||||
struct ceph_vino vino;
|
struct ceph_vino vino;
|
||||||
u64 cap_id;
|
u64 cap_id;
|
||||||
u64 size, max_size;
|
u64 size, max_size;
|
||||||
|
@ -2675,6 +2683,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
||||||
vino.snap = CEPH_NOSNAP;
|
vino.snap = CEPH_NOSNAP;
|
||||||
cap_id = le64_to_cpu(h->cap_id);
|
cap_id = le64_to_cpu(h->cap_id);
|
||||||
seq = le32_to_cpu(h->seq);
|
seq = le32_to_cpu(h->seq);
|
||||||
|
mseq = le32_to_cpu(h->migrate_seq);
|
||||||
size = le64_to_cpu(h->size);
|
size = le64_to_cpu(h->size);
|
||||||
max_size = le64_to_cpu(h->max_size);
|
max_size = le64_to_cpu(h->max_size);
|
||||||
|
|
||||||
|
@ -2689,6 +2698,18 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
||||||
vino.snap, inode);
|
vino.snap, inode);
|
||||||
if (!inode) {
|
if (!inode) {
|
||||||
dout(" i don't have ino %llx\n", vino.ino);
|
dout(" i don't have ino %llx\n", vino.ino);
|
||||||
|
|
||||||
|
if (op == CEPH_CAP_OP_IMPORT)
|
||||||
|
__queue_cap_release(session, vino.ino, cap_id,
|
||||||
|
mseq, seq);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* send any full release message to try to move things
|
||||||
|
* along for the mds (who clearly thinks we still have this
|
||||||
|
* cap).
|
||||||
|
*/
|
||||||
|
ceph_add_cap_releases(mdsc, session, -1);
|
||||||
|
ceph_send_cap_releases(mdsc, session);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2714,7 +2735,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
cap = __get_cap_for_mds(ceph_inode(inode), mds);
|
cap = __get_cap_for_mds(ceph_inode(inode), mds);
|
||||||
if (!cap) {
|
if (!cap) {
|
||||||
dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
|
dout(" no cap on %p ino %llx.%llx from mds%d\n",
|
||||||
inode, ceph_ino(inode), ceph_snap(inode), mds);
|
inode, ceph_ino(inode), ceph_snap(inode), mds);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -827,7 +827,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
|
||||||
|
|
||||||
spin_lock(&dcache_lock);
|
spin_lock(&dcache_lock);
|
||||||
spin_lock(&dn->d_lock);
|
spin_lock(&dn->d_lock);
|
||||||
list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
|
list_move(&dn->d_u.d_child, &dir->d_subdirs);
|
||||||
dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
|
dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
|
||||||
dn->d_u.d_child.prev, dn->d_u.d_child.next);
|
dn->d_u.d_child.prev, dn->d_u.d_child.next);
|
||||||
spin_unlock(&dn->d_lock);
|
spin_unlock(&dn->d_lock);
|
||||||
|
|
|
@ -1066,7 +1066,7 @@ static int trim_caps(struct ceph_mds_client *mdsc,
|
||||||
*
|
*
|
||||||
* Called under s_mutex.
|
* Called under s_mutex.
|
||||||
*/
|
*/
|
||||||
static int add_cap_releases(struct ceph_mds_client *mdsc,
|
int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
|
||||||
struct ceph_mds_session *session,
|
struct ceph_mds_session *session,
|
||||||
int extra)
|
int extra)
|
||||||
{
|
{
|
||||||
|
@ -1176,7 +1176,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
|
||||||
/*
|
/*
|
||||||
* called under s_mutex
|
* called under s_mutex
|
||||||
*/
|
*/
|
||||||
static void send_cap_releases(struct ceph_mds_client *mdsc,
|
void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
|
||||||
struct ceph_mds_session *session)
|
struct ceph_mds_session *session)
|
||||||
{
|
{
|
||||||
struct ceph_msg *msg;
|
struct ceph_msg *msg;
|
||||||
|
@ -1980,7 +1980,7 @@ out_err:
|
||||||
}
|
}
|
||||||
mutex_unlock(&mdsc->mutex);
|
mutex_unlock(&mdsc->mutex);
|
||||||
|
|
||||||
add_cap_releases(mdsc, req->r_session, -1);
|
ceph_add_cap_releases(mdsc, req->r_session, -1);
|
||||||
mutex_unlock(&session->s_mutex);
|
mutex_unlock(&session->s_mutex);
|
||||||
|
|
||||||
/* kick calling process */
|
/* kick calling process */
|
||||||
|
@ -2433,6 +2433,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
||||||
struct ceph_dentry_info *di;
|
struct ceph_dentry_info *di;
|
||||||
int mds = session->s_mds;
|
int mds = session->s_mds;
|
||||||
struct ceph_mds_lease *h = msg->front.iov_base;
|
struct ceph_mds_lease *h = msg->front.iov_base;
|
||||||
|
u32 seq;
|
||||||
struct ceph_vino vino;
|
struct ceph_vino vino;
|
||||||
int mask;
|
int mask;
|
||||||
struct qstr dname;
|
struct qstr dname;
|
||||||
|
@ -2446,6 +2447,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
||||||
vino.ino = le64_to_cpu(h->ino);
|
vino.ino = le64_to_cpu(h->ino);
|
||||||
vino.snap = CEPH_NOSNAP;
|
vino.snap = CEPH_NOSNAP;
|
||||||
mask = le16_to_cpu(h->mask);
|
mask = le16_to_cpu(h->mask);
|
||||||
|
seq = le32_to_cpu(h->seq);
|
||||||
dname.name = (void *)h + sizeof(*h) + sizeof(u32);
|
dname.name = (void *)h + sizeof(*h) + sizeof(u32);
|
||||||
dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
|
dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
|
||||||
if (dname.len != get_unaligned_le32(h+1))
|
if (dname.len != get_unaligned_le32(h+1))
|
||||||
|
@ -2456,8 +2458,9 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
||||||
|
|
||||||
/* lookup inode */
|
/* lookup inode */
|
||||||
inode = ceph_find_inode(sb, vino);
|
inode = ceph_find_inode(sb, vino);
|
||||||
dout("handle_lease '%s', mask %d, ino %llx %p\n",
|
dout("handle_lease %s, mask %d, ino %llx %p %.*s\n",
|
||||||
ceph_lease_op_name(h->action), mask, vino.ino, inode);
|
ceph_lease_op_name(h->action), mask, vino.ino, inode,
|
||||||
|
dname.len, dname.name);
|
||||||
if (inode == NULL) {
|
if (inode == NULL) {
|
||||||
dout("handle_lease no inode %llx\n", vino.ino);
|
dout("handle_lease no inode %llx\n", vino.ino);
|
||||||
goto release;
|
goto release;
|
||||||
|
@ -2482,6 +2485,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
||||||
switch (h->action) {
|
switch (h->action) {
|
||||||
case CEPH_MDS_LEASE_REVOKE:
|
case CEPH_MDS_LEASE_REVOKE:
|
||||||
if (di && di->lease_session == session) {
|
if (di && di->lease_session == session) {
|
||||||
|
if (ceph_seq_cmp(di->lease_seq, seq) > 0)
|
||||||
h->seq = cpu_to_le32(di->lease_seq);
|
h->seq = cpu_to_le32(di->lease_seq);
|
||||||
__ceph_mdsc_drop_dentry_lease(dentry);
|
__ceph_mdsc_drop_dentry_lease(dentry);
|
||||||
}
|
}
|
||||||
|
@ -2496,7 +2500,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
|
||||||
unsigned long duration =
|
unsigned long duration =
|
||||||
le32_to_cpu(h->duration_ms) * HZ / 1000;
|
le32_to_cpu(h->duration_ms) * HZ / 1000;
|
||||||
|
|
||||||
di->lease_seq = le32_to_cpu(h->seq);
|
di->lease_seq = seq;
|
||||||
dentry->d_time = di->lease_renew_from + duration;
|
dentry->d_time = di->lease_renew_from + duration;
|
||||||
di->lease_renew_after = di->lease_renew_from +
|
di->lease_renew_after = di->lease_renew_from +
|
||||||
(duration >> 1);
|
(duration >> 1);
|
||||||
|
@ -2686,10 +2690,10 @@ static void delayed_work(struct work_struct *work)
|
||||||
send_renew_caps(mdsc, s);
|
send_renew_caps(mdsc, s);
|
||||||
else
|
else
|
||||||
ceph_con_keepalive(&s->s_con);
|
ceph_con_keepalive(&s->s_con);
|
||||||
add_cap_releases(mdsc, s, -1);
|
ceph_add_cap_releases(mdsc, s, -1);
|
||||||
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
|
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
|
||||||
s->s_state == CEPH_MDS_SESSION_HUNG)
|
s->s_state == CEPH_MDS_SESSION_HUNG)
|
||||||
send_cap_releases(mdsc, s);
|
ceph_send_cap_releases(mdsc, s);
|
||||||
mutex_unlock(&s->s_mutex);
|
mutex_unlock(&s->s_mutex);
|
||||||
ceph_put_mds_session(s);
|
ceph_put_mds_session(s);
|
||||||
|
|
||||||
|
|
|
@ -322,6 +322,12 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
|
||||||
kref_put(&req->r_kref, ceph_mdsc_release_request);
|
kref_put(&req->r_kref, ceph_mdsc_release_request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
|
||||||
|
struct ceph_mds_session *session,
|
||||||
|
int extra);
|
||||||
|
extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
|
||||||
|
struct ceph_mds_session *session);
|
||||||
|
|
||||||
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
|
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
|
||||||
|
|
||||||
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
|
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
|
||||||
|
|
|
@ -400,6 +400,8 @@ static void release_generic_request(struct kref *kref)
|
||||||
ceph_msg_put(req->reply);
|
ceph_msg_put(req->reply);
|
||||||
if (req->request)
|
if (req->request)
|
||||||
ceph_msg_put(req->request);
|
ceph_msg_put(req->request);
|
||||||
|
|
||||||
|
kfree(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_generic_request(struct ceph_mon_generic_request *req)
|
static void put_generic_request(struct ceph_mon_generic_request *req)
|
||||||
|
|
|
@ -89,7 +89,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||||
|
|
||||||
buf->f_files = le64_to_cpu(st.num_objects);
|
buf->f_files = le64_to_cpu(st.num_objects);
|
||||||
buf->f_ffree = -1;
|
buf->f_ffree = -1;
|
||||||
buf->f_namelen = PATH_MAX;
|
buf->f_namelen = NAME_MAX;
|
||||||
buf->f_frsize = PAGE_CACHE_SIZE;
|
buf->f_frsize = PAGE_CACHE_SIZE;
|
||||||
|
|
||||||
/* leave fsid little-endian, regardless of host endianness */
|
/* leave fsid little-endian, regardless of host endianness */
|
||||||
|
@ -926,7 +926,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
|
||||||
/*
|
/*
|
||||||
* construct our own bdi so we can control readahead, etc.
|
* construct our own bdi so we can control readahead, etc.
|
||||||
*/
|
*/
|
||||||
static atomic_long_t bdi_seq = ATOMIC_INIT(0);
|
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
|
||||||
|
|
||||||
static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
|
static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
|
||||||
{
|
{
|
||||||
|
|
|
@ -53,7 +53,7 @@
|
||||||
|
|
||||||
|
|
||||||
extern const char *drbd_buildtag(void);
|
extern const char *drbd_buildtag(void);
|
||||||
#define REL_VERSION "8.3.8rc2"
|
#define REL_VERSION "8.3.8"
|
||||||
#define API_VERSION 88
|
#define API_VERSION 88
|
||||||
#define PRO_VERSION_MIN 86
|
#define PRO_VERSION_MIN 86
|
||||||
#define PRO_VERSION_MAX 94
|
#define PRO_VERSION_MAX 94
|
||||||
|
|
|
@ -380,7 +380,10 @@ struct sk_buff {
|
||||||
kmemcheck_bitfield_begin(flags2);
|
kmemcheck_bitfield_begin(flags2);
|
||||||
__u16 queue_mapping:16;
|
__u16 queue_mapping:16;
|
||||||
#ifdef CONFIG_IPV6_NDISC_NODETYPE
|
#ifdef CONFIG_IPV6_NDISC_NODETYPE
|
||||||
__u8 ndisc_nodetype:2;
|
__u8 ndisc_nodetype:2,
|
||||||
|
deliver_no_wcard:1;
|
||||||
|
#else
|
||||||
|
__u8 deliver_no_wcard:1;
|
||||||
#endif
|
#endif
|
||||||
kmemcheck_bitfield_end(flags2);
|
kmemcheck_bitfield_end(flags2);
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
|
||||||
return NET_RX_DROP;
|
return NET_RX_DROP;
|
||||||
|
|
||||||
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
||||||
goto drop;
|
skb->deliver_no_wcard = 1;
|
||||||
|
|
||||||
skb->skb_iif = skb->dev->ifindex;
|
skb->skb_iif = skb->dev->ifindex;
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||||
|
@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
|
||||||
struct sk_buff *p;
|
struct sk_buff *p;
|
||||||
|
|
||||||
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
|
||||||
goto drop;
|
skb->deliver_no_wcard = 1;
|
||||||
|
|
||||||
skb->skb_iif = skb->dev->ifindex;
|
skb->skb_iif = skb->dev->ifindex;
|
||||||
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
__vlan_hwaccel_put_tag(skb, vlan_tci);
|
||||||
|
|
|
@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||||
if (!cfsrvl_ready(service, &ret))
|
if (!cfsrvl_ready(service, &ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
||||||
pr_err("CAIF: %s():Packet too large - size=%d\n",
|
pr_err("CAIF: %s():Packet too large - size=%d\n",
|
||||||
__func__, cfpkt_getlen(pkt));
|
__func__, cfpkt_getlen(pkt));
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
|
@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||||
return ret;
|
return ret;
|
||||||
caif_assert(layr->dn != NULL);
|
caif_assert(layr->dn != NULL);
|
||||||
caif_assert(layr->dn->transmit != NULL);
|
caif_assert(layr->dn->transmit != NULL);
|
||||||
if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
|
||||||
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
|
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
|
||||||
__func__, cfpkt_getlen(pkt));
|
__func__, cfpkt_getlen(pkt));
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
|
@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
||||||
if (skb_rx_queue_recorded(skb)) {
|
if (skb_rx_queue_recorded(skb)) {
|
||||||
u16 index = skb_get_rx_queue(skb);
|
u16 index = skb_get_rx_queue(skb);
|
||||||
if (unlikely(index >= dev->num_rx_queues)) {
|
if (unlikely(index >= dev->num_rx_queues)) {
|
||||||
if (net_ratelimit()) {
|
WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
|
||||||
pr_warning("%s received packet on queue "
|
"on queue %u, but number of RX queues is %u\n",
|
||||||
"%u, but number of RX queues is %u\n",
|
|
||||||
dev->name, index, dev->num_rx_queues);
|
dev->name, index, dev->num_rx_queues);
|
||||||
}
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
rxqueue = dev->_rx + index;
|
rxqueue = dev->_rx + index;
|
||||||
|
@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
||||||
if (!skb->skb_iif)
|
if (!skb->skb_iif)
|
||||||
skb->skb_iif = skb->dev->ifindex;
|
skb->skb_iif = skb->dev->ifindex;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bonding note: skbs received on inactive slaves should only
|
||||||
|
* be delivered to pkt handlers that are exact matches. Also
|
||||||
|
* the deliver_no_wcard flag will be set. If packet handlers
|
||||||
|
* are sensitive to duplicate packets these skbs will need to
|
||||||
|
* be dropped at the handler. The vlan accel path may have
|
||||||
|
* already set the deliver_no_wcard flag.
|
||||||
|
*/
|
||||||
null_or_orig = NULL;
|
null_or_orig = NULL;
|
||||||
orig_dev = skb->dev;
|
orig_dev = skb->dev;
|
||||||
master = ACCESS_ONCE(orig_dev->master);
|
master = ACCESS_ONCE(orig_dev->master);
|
||||||
if (master) {
|
if (skb->deliver_no_wcard)
|
||||||
if (skb_bond_should_drop(skb, master))
|
null_or_orig = orig_dev;
|
||||||
|
else if (master) {
|
||||||
|
if (skb_bond_should_drop(skb, master)) {
|
||||||
|
skb->deliver_no_wcard = 1;
|
||||||
null_or_orig = orig_dev; /* deliver only exact match */
|
null_or_orig = orig_dev; /* deliver only exact match */
|
||||||
else
|
} else
|
||||||
skb->dev = master;
|
skb->dev = master;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
|
||||||
|
|
||||||
/* Protects against soft lockup during large deletion */
|
/* Protects against soft lockup during large deletion */
|
||||||
static struct rb_root est_root = RB_ROOT;
|
static struct rb_root est_root = RB_ROOT;
|
||||||
|
static DEFINE_SPINLOCK(est_tree_lock);
|
||||||
|
|
||||||
static void est_timer(unsigned long arg)
|
static void est_timer(unsigned long arg)
|
||||||
{
|
{
|
||||||
|
@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
|
||||||
*
|
*
|
||||||
* Returns 0 on success or a negative error code.
|
* Returns 0 on success or a negative error code.
|
||||||
*
|
*
|
||||||
* NOTE: Called under rtnl_mutex
|
|
||||||
*/
|
*/
|
||||||
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
struct gnet_stats_rate_est *rate_est,
|
struct gnet_stats_rate_est *rate_est,
|
||||||
|
@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
est->last_packets = bstats->packets;
|
est->last_packets = bstats->packets;
|
||||||
est->avpps = rate_est->pps<<10;
|
est->avpps = rate_est->pps<<10;
|
||||||
|
|
||||||
|
spin_lock(&est_tree_lock);
|
||||||
if (!elist[idx].timer.function) {
|
if (!elist[idx].timer.function) {
|
||||||
INIT_LIST_HEAD(&elist[idx].list);
|
INIT_LIST_HEAD(&elist[idx].list);
|
||||||
setup_timer(&elist[idx].timer, est_timer, idx);
|
setup_timer(&elist[idx].timer, est_timer, idx);
|
||||||
|
@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
|
|
||||||
list_add_rcu(&est->list, &elist[idx].list);
|
list_add_rcu(&est->list, &elist[idx].list);
|
||||||
gen_add_node(est);
|
gen_add_node(est);
|
||||||
|
spin_unlock(&est_tree_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
|
||||||
*
|
*
|
||||||
* Removes the rate estimator specified by &bstats and &rate_est.
|
* Removes the rate estimator specified by &bstats and &rate_est.
|
||||||
*
|
*
|
||||||
* NOTE: Called under rtnl_mutex
|
|
||||||
*/
|
*/
|
||||||
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
struct gnet_stats_rate_est *rate_est)
|
struct gnet_stats_rate_est *rate_est)
|
||||||
{
|
{
|
||||||
struct gen_estimator *e;
|
struct gen_estimator *e;
|
||||||
|
|
||||||
|
spin_lock(&est_tree_lock);
|
||||||
while ((e = gen_find_node(bstats, rate_est))) {
|
while ((e = gen_find_node(bstats, rate_est))) {
|
||||||
rb_erase(&e->node, &est_root);
|
rb_erase(&e->node, &est_root);
|
||||||
|
|
||||||
|
@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
|
||||||
list_del_rcu(&e->list);
|
list_del_rcu(&e->list);
|
||||||
call_rcu(&e->e_rcu, __gen_kill_estimator);
|
call_rcu(&e->e_rcu, __gen_kill_estimator);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&est_tree_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(gen_kill_estimator);
|
EXPORT_SYMBOL(gen_kill_estimator);
|
||||||
|
|
||||||
|
@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
|
||||||
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
|
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
|
||||||
const struct gnet_stats_rate_est *rate_est)
|
const struct gnet_stats_rate_est *rate_est)
|
||||||
{
|
{
|
||||||
|
bool res;
|
||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
return gen_find_node(bstats, rate_est) != NULL;
|
spin_lock(&est_tree_lock);
|
||||||
|
res = gen_find_node(bstats, rate_est) != NULL;
|
||||||
|
spin_unlock(&est_tree_lock);
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(gen_estimator_active);
|
EXPORT_SYMBOL(gen_estimator_active);
|
||||||
|
|
|
@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
||||||
end_time = ktime_now();
|
end_time = ktime_now();
|
||||||
|
|
||||||
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
|
||||||
pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
|
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
|
||||||
|
|
|
@ -483,7 +483,7 @@ route_done:
|
||||||
np->tclass, NULL, &fl, (struct rt6_info*)dst,
|
np->tclass, NULL, &fl, (struct rt6_info*)dst,
|
||||||
MSG_DONTWAIT, np->dontfrag);
|
MSG_DONTWAIT, np->dontfrag);
|
||||||
if (err) {
|
if (err) {
|
||||||
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
|
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
|
||||||
ip6_flush_pending_frames(sk);
|
ip6_flush_pending_frames(sk);
|
||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
|
||||||
np->dontfrag);
|
np->dontfrag);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
|
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
|
||||||
ip6_flush_pending_frames(sk);
|
ip6_flush_pending_frames(sk);
|
||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,11 @@ __modbuiltin:
|
||||||
|
|
||||||
include scripts/Kbuild.include
|
include scripts/Kbuild.include
|
||||||
|
|
||||||
|
ifneq ($(KBUILD_SRC),)
|
||||||
|
# Create output directory if not already present
|
||||||
|
_dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
|
||||||
|
endif
|
||||||
|
|
||||||
# The filename Kbuild has precedence over Makefile
|
# The filename Kbuild has precedence over Makefile
|
||||||
kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
|
kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
|
||||||
kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
|
kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
|
||||||
|
|
Loading…
Reference in New Issue