Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
290d9bf281
13
Makefile
13
Makefile
|
@ -396,7 +396,7 @@ LINUXINCLUDE := \
|
|||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
|
||||
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common \
|
||||
-fno-strict-aliasing -fno-common -fshort-wchar \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-std=gnu89 $(call cc-option,-fno-PIE)
|
||||
|
@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
|
|||
# ===========================================================================
|
||||
# Rules shared between *config targets and build targets
|
||||
|
||||
# Basic helpers built in scripts/
|
||||
# Basic helpers built in scripts/basic/
|
||||
PHONY += scripts_basic
|
||||
scripts_basic:
|
||||
$(Q)$(MAKE) $(build)=scripts/basic
|
||||
|
@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),)
|
|||
endif
|
||||
endif
|
||||
endif
|
||||
# install and module_install need also be processed one by one
|
||||
# install and modules_install need also be processed one by one
|
||||
ifneq ($(filter install,$(MAKECMDGOALS)),)
|
||||
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
|
||||
mixed-targets := 1
|
||||
|
@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-
|
|||
export KBUILD_VMLINUX_LIBS := $(libs-y1)
|
||||
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
|
||||
export LDFLAGS_vmlinux
|
||||
# used by scripts/pacmage/Makefile
|
||||
# used by scripts/package/Makefile
|
||||
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
|
||||
|
||||
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
|
||||
|
@ -992,8 +992,8 @@ include/generated/autoksyms.h: FORCE
|
|||
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
|
||||
|
||||
# Final link of vmlinux with optional arch pass after final link
|
||||
cmd_link-vmlinux = \
|
||||
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
|
||||
cmd_link-vmlinux = \
|
||||
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
|
||||
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
|
||||
|
||||
vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
|
||||
|
@ -1184,6 +1184,7 @@ PHONY += kselftest
|
|||
kselftest:
|
||||
$(Q)$(MAKE) -C tools/testing/selftests run_tests
|
||||
|
||||
PHONY += kselftest-clean
|
||||
kselftest-clean:
|
||||
$(Q)$(MAKE) -C tools/testing/selftests clean
|
||||
|
||||
|
|
|
@ -266,6 +266,7 @@
|
|||
|
||||
&hdmicec {
|
||||
status = "okay";
|
||||
needs-hpd;
|
||||
};
|
||||
|
||||
&hsi2c_4 {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
menuconfig ARCH_AT91
|
||||
bool "Atmel SoCs"
|
||||
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
|
||||
select ARM_CPU_SUSPEND if PM
|
||||
select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7
|
||||
select COMMON_CLK_AT91
|
||||
select GPIOLIB
|
||||
select PINCTRL
|
||||
|
|
|
@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void))
|
|||
|
||||
void __init at91rm9200_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
|
||||
/*
|
||||
|
@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void)
|
|||
|
||||
void __init at91sam9_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
at91_pm_init(at91sam9_idle);
|
||||
}
|
||||
|
||||
void __init sama5_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_SAMA5))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
at91_pm_init(NULL);
|
||||
}
|
||||
|
||||
void __init sama5d2_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
|
||||
return;
|
||||
|
||||
at91_pm_backup_init();
|
||||
sama5_pm_init();
|
||||
}
|
||||
|
|
|
@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
|
|||
free_buffer_on_error = TRUE;
|
||||
}
|
||||
|
||||
status = acpi_get_handle(handle, pathname, &target_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
if (pathname) {
|
||||
status = acpi_get_handle(handle, pathname, &target_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
} else {
|
||||
target_handle = handle;
|
||||
}
|
||||
|
||||
full_pathname = acpi_ns_get_external_pathname(target_handle);
|
||||
|
|
|
@ -1741,7 +1741,7 @@ error:
|
|||
* functioning ECDT EC first in order to handle the events.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
|
||||
*/
|
||||
int __init acpi_ec_ecdt_start(void)
|
||||
static int __init acpi_ec_ecdt_start(void)
|
||||
{
|
||||
acpi_handle handle;
|
||||
|
||||
|
@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void)
|
|||
int __init acpi_ec_init(void)
|
||||
{
|
||||
int result;
|
||||
int ecdt_fail, dsdt_fail;
|
||||
|
||||
/* register workqueue for _Qxx evaluations */
|
||||
result = acpi_ec_query_init();
|
||||
if (result)
|
||||
goto err_exit;
|
||||
/* Now register the driver for the EC */
|
||||
result = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
if (result)
|
||||
goto err_exit;
|
||||
return result;
|
||||
|
||||
err_exit:
|
||||
if (result)
|
||||
acpi_ec_query_exit();
|
||||
return result;
|
||||
/* Drivers must be started after acpi_ec_query_init() */
|
||||
ecdt_fail = acpi_ec_ecdt_start();
|
||||
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
|
||||
}
|
||||
|
||||
/* EC driver currently not unloadable */
|
||||
|
|
|
@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
|
|||
int acpi_ec_init(void);
|
||||
int acpi_ec_ecdt_probe(void);
|
||||
int acpi_ec_dsdt_probe(void);
|
||||
int acpi_ec_ecdt_start(void);
|
||||
void acpi_ec_block_transactions(void);
|
||||
void acpi_ec_unblock_transactions(void);
|
||||
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
||||
|
|
|
@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
|
|||
fwnode_for_each_child_node(fwnode, child) {
|
||||
u32 nr;
|
||||
|
||||
if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
|
||||
if (fwnode_property_read_u32(child, prop_name, &nr))
|
||||
continue;
|
||||
|
||||
if (val == nr)
|
||||
|
|
|
@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void)
|
|||
|
||||
acpi_gpe_apply_masked_gpes();
|
||||
acpi_update_all_gpes();
|
||||
acpi_ec_ecdt_start();
|
||||
|
||||
acpi_scan_initialized = true;
|
||||
|
||||
|
|
|
@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
|
|||
cq->uobject = &obj->uobject;
|
||||
cq->comp_handler = ib_uverbs_comp_handler;
|
||||
cq->event_handler = ib_uverbs_cq_event_handler;
|
||||
cq->cq_context = &ev_file->ev_queue;
|
||||
cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
|
||||
atomic_set(&cq->usecnt, 0);
|
||||
|
||||
obj->uobject.object = cq;
|
||||
|
@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file,
|
|||
qp->qp_type = attr.qp_type;
|
||||
atomic_set(&qp->usecnt, 0);
|
||||
atomic_inc(&pd->usecnt);
|
||||
qp->port = 0;
|
||||
if (attr.send_cq)
|
||||
atomic_inc(&attr.send_cq->usecnt);
|
||||
if (attr.recv_cq)
|
||||
|
@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file,
|
|||
attr->alt_timeout = cmd->base.alt_timeout;
|
||||
attr->rate_limit = cmd->rate_limit;
|
||||
|
||||
attr->ah_attr.type = rdma_ah_find_type(qp->device,
|
||||
cmd->base.dest.port_num);
|
||||
if (cmd->base.attr_mask & IB_QP_AV)
|
||||
attr->ah_attr.type = rdma_ah_find_type(qp->device,
|
||||
cmd->base.dest.port_num);
|
||||
if (cmd->base.dest.is_global) {
|
||||
rdma_ah_set_grh(&attr->ah_attr, NULL,
|
||||
cmd->base.dest.flow_label,
|
||||
|
@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file,
|
|||
rdma_ah_set_port_num(&attr->ah_attr,
|
||||
cmd->base.dest.port_num);
|
||||
|
||||
attr->alt_ah_attr.type = rdma_ah_find_type(qp->device,
|
||||
cmd->base.dest.port_num);
|
||||
if (cmd->base.attr_mask & IB_QP_ALT_PATH)
|
||||
attr->alt_ah_attr.type =
|
||||
rdma_ah_find_type(qp->device, cmd->base.dest.port_num);
|
||||
if (cmd->base.alt_dest.is_global) {
|
||||
rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
|
||||
cmd->base.alt_dest.flow_label,
|
||||
|
|
|
@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
|
|||
spin_lock_init(&qp->mr_lock);
|
||||
INIT_LIST_HEAD(&qp->rdma_mrs);
|
||||
INIT_LIST_HEAD(&qp->sig_mrs);
|
||||
qp->port = 0;
|
||||
|
||||
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
|
||||
return ib_create_xrc_qp(qp, qp_init_attr);
|
||||
|
@ -1297,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return ib_security_modify_qp(qp, attr, attr_mask, udata);
|
||||
ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
|
||||
if (!ret && (attr_mask & IB_QP_PORT))
|
||||
qp->port = attr->port_num;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_modify_qp_with_udata);
|
||||
|
||||
|
|
|
@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
|
|||
bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
|
||||
IB_LINK_LAYER_INFINIBAND);
|
||||
|
||||
/* CM layer calls ib_modify_port() regardless of the link layer. For
|
||||
* Ethernet ports, qkey violation and Port capabilities are meaningless.
|
||||
*/
|
||||
if (!is_ib)
|
||||
return 0;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
|
||||
change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
|
||||
value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
|
||||
|
|
|
@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
goto err_destroy_tis;
|
||||
|
||||
sq->base.container_mibqp = qp;
|
||||
sq->base.mqp.event = mlx5_ib_qp_event;
|
||||
}
|
||||
|
||||
if (qp->rq.wqe_cnt) {
|
||||
|
|
|
@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate {
|
|||
{ .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos}
|
||||
|
||||
#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \
|
||||
{ .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos}
|
||||
{ .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos}
|
||||
|
||||
static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid,
|
||||
struct atmel_ebi_dev_config *conf)
|
||||
|
@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid,
|
|||
if (!ret) {
|
||||
required = true;
|
||||
ncycles = DIV_ROUND_UP(val, clk_period_ns);
|
||||
if (ncycles > ATMEL_SMC_MODE_TDF_MAX ||
|
||||
ncycles < ATMEL_SMC_MODE_TDF_MIN) {
|
||||
if (ncycles > ATMEL_SMC_MODE_TDF_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
|
||||
ncycles = ATMEL_SMC_MODE_TDF_MIN;
|
||||
|
||||
smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles);
|
||||
}
|
||||
|
||||
|
@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid,
|
|||
}
|
||||
|
||||
ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if ((ret > 0 && !required) || (!ret && required)) {
|
||||
|
|
|
@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse);
|
|||
* parameter
|
||||
*
|
||||
* This function encodes the @ncycles value as described in the datasheet
|
||||
* (section "SMC Pulse Register"), and then stores the result in the
|
||||
* (section "SMC Cycle Register"), and then stores the result in the
|
||||
* @conf->setup field at @shift position.
|
||||
*
|
||||
* Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in
|
||||
|
|
|
@ -745,6 +745,9 @@ void *knav_pool_create(const char *name,
|
|||
bool slot_found;
|
||||
int ret;
|
||||
|
||||
if (!kdev)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
if (!kdev->dev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
|
|
|
@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
|
|||
#ifdef CONFIG_UNIX98_PTYS
|
||||
if (tty->driver == ptm_driver) {
|
||||
mutex_lock(&devpts_mutex);
|
||||
if (tty->link->driver_data) {
|
||||
struct path *path = tty->link->driver_data;
|
||||
|
||||
devpts_pty_kill(path->dentry);
|
||||
path_put(path);
|
||||
kfree(path);
|
||||
}
|
||||
if (tty->link->driver_data)
|
||||
devpts_pty_kill(tty->link->driver_data);
|
||||
mutex_unlock(&devpts_mutex);
|
||||
}
|
||||
#endif
|
||||
|
@ -607,25 +602,24 @@ static inline void legacy_pty_init(void) { }
|
|||
static struct cdev ptmx_cdev;
|
||||
|
||||
/**
|
||||
* pty_open_peer - open the peer of a pty
|
||||
* @tty: the peer of the pty being opened
|
||||
* ptm_open_peer - open the peer of a pty
|
||||
* @master: the open struct file of the ptmx device node
|
||||
* @tty: the master of the pty being opened
|
||||
* @flags: the flags for open
|
||||
*
|
||||
* Open the cached dentry in tty->link, providing a safe way for userspace
|
||||
* to get the slave end of a pty (where they have the master fd and cannot
|
||||
* access or trust the mount namespace /dev/pts was mounted inside).
|
||||
* Provide a race free way for userspace to open the slave end of a pty
|
||||
* (where they have the master fd and cannot access or trust the mount
|
||||
* namespace /dev/pts was mounted inside).
|
||||
*/
|
||||
static struct file *pty_open_peer(struct tty_struct *tty, int flags)
|
||||
{
|
||||
if (tty->driver->subtype != PTY_TYPE_MASTER)
|
||||
return ERR_PTR(-EIO);
|
||||
return dentry_open(tty->link->driver_data, flags, current_cred());
|
||||
}
|
||||
|
||||
static int pty_get_peer(struct tty_struct *tty, int flags)
|
||||
int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
|
||||
{
|
||||
int fd = -1;
|
||||
struct file *filp = NULL;
|
||||
struct file *filp;
|
||||
int retval = -EINVAL;
|
||||
struct path path;
|
||||
|
||||
if (tty->driver != ptm_driver)
|
||||
return -EIO;
|
||||
|
||||
fd = get_unused_fd_flags(0);
|
||||
if (fd < 0) {
|
||||
|
@ -633,7 +627,16 @@ static int pty_get_peer(struct tty_struct *tty, int flags)
|
|||
goto err;
|
||||
}
|
||||
|
||||
filp = pty_open_peer(tty, flags);
|
||||
/* Compute the slave's path */
|
||||
path.mnt = devpts_mntget(master, tty->driver_data);
|
||||
if (IS_ERR(path.mnt)) {
|
||||
retval = PTR_ERR(path.mnt);
|
||||
goto err_put;
|
||||
}
|
||||
path.dentry = tty->link->driver_data;
|
||||
|
||||
filp = dentry_open(&path, flags, current_cred());
|
||||
mntput(path.mnt);
|
||||
if (IS_ERR(filp)) {
|
||||
retval = PTR_ERR(filp);
|
||||
goto err_put;
|
||||
|
@ -662,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
|
|||
return pty_get_pktmode(tty, (int __user *)arg);
|
||||
case TIOCGPTN: /* Get PT Number */
|
||||
return put_user(tty->index, (unsigned int __user *)arg);
|
||||
case TIOCGPTPEER: /* Open the other end */
|
||||
return pty_get_peer(tty, (int) arg);
|
||||
case TIOCSIG: /* Send signal to other side of pty */
|
||||
return pty_signal(tty, (int) arg);
|
||||
}
|
||||
|
@ -791,7 +792,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
|
|||
{
|
||||
struct pts_fs_info *fsi;
|
||||
struct tty_struct *tty;
|
||||
struct path *pts_path;
|
||||
struct dentry *dentry;
|
||||
int retval;
|
||||
int index;
|
||||
|
@ -845,26 +845,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
|
|||
retval = PTR_ERR(dentry);
|
||||
goto err_release;
|
||||
}
|
||||
/* We need to cache a fake path for TIOCGPTPEER. */
|
||||
pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
|
||||
if (!pts_path)
|
||||
goto err_release;
|
||||
pts_path->mnt = filp->f_path.mnt;
|
||||
pts_path->dentry = dentry;
|
||||
path_get(pts_path);
|
||||
tty->link->driver_data = pts_path;
|
||||
tty->link->driver_data = dentry;
|
||||
|
||||
retval = ptm_driver->ops->open(tty, filp);
|
||||
if (retval)
|
||||
goto err_path_put;
|
||||
goto err_release;
|
||||
|
||||
tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
|
||||
|
||||
tty_unlock(tty);
|
||||
return 0;
|
||||
err_path_put:
|
||||
path_put(pts_path);
|
||||
kfree(pts_path);
|
||||
err_release:
|
||||
tty_unlock(tty);
|
||||
// This will also put-ref the fsi
|
||||
|
|
|
@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
case TIOCSSERIAL:
|
||||
tty_warn_deprecated_flags(p);
|
||||
break;
|
||||
case TIOCGPTPEER:
|
||||
/* Special because the struct file is needed */
|
||||
return ptm_open_peer(file, tty, (int)arg);
|
||||
default:
|
||||
retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
|
||||
if (retval != -ENOIOCTLCMD)
|
||||
|
|
|
@ -7,9 +7,6 @@ obj-y += xenbus/
|
|||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
CFLAGS_features.o := $(nostackp)
|
||||
|
||||
CFLAGS_efi.o += -fshort-wchar
|
||||
LDFLAGS += $(call ld-option, --no-wchar-size-warning)
|
||||
|
||||
dom0-$(CONFIG_ARM64) += arm-device.o
|
||||
dom0-$(CONFIG_PCI) += pci.o
|
||||
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
|
||||
|
|
|
@ -3516,7 +3516,7 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device)
|
|||
struct bio *bio = device->flush_bio;
|
||||
|
||||
if (!device->flush_bio_sent)
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
|
||||
device->flush_bio_sent = 0;
|
||||
wait_for_completion_io(&device->flush_wait);
|
||||
|
@ -3563,7 +3563,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
|
|||
continue;
|
||||
|
||||
write_dev_flush(dev);
|
||||
dev->last_flush_error = 0;
|
||||
dev->last_flush_error = BLK_STS_OK;
|
||||
}
|
||||
|
||||
/* wait for all the barriers */
|
||||
|
|
|
@ -7924,11 +7924,12 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num)
|
||||
static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
|
||||
struct bio *bio,
|
||||
int mirror_num)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
|
||||
|
||||
|
@ -7980,10 +7981,10 @@ static int btrfs_check_dio_repairable(struct inode *inode,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
struct page *page, unsigned int pgoff,
|
||||
u64 start, u64 end, int failed_mirror,
|
||||
bio_end_io_t *repair_endio, void *repair_arg)
|
||||
static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
struct page *page, unsigned int pgoff,
|
||||
u64 start, u64 end, int failed_mirror,
|
||||
bio_end_io_t *repair_endio, void *repair_arg)
|
||||
{
|
||||
struct io_failure_record *failrec;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
|
@ -7993,18 +7994,19 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
|||
int read_mode = 0;
|
||||
int segs;
|
||||
int ret;
|
||||
blk_status_t status;
|
||||
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
|
||||
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
|
||||
if (ret)
|
||||
return ret;
|
||||
return errno_to_blk_status(ret);
|
||||
|
||||
ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
|
||||
failed_mirror);
|
||||
if (!ret) {
|
||||
free_io_failure(failure_tree, io_tree, failrec);
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
segs = bio_segments(failed_bio);
|
||||
|
@ -8022,13 +8024,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
|||
"Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
|
||||
read_mode, failrec->this_mirror, failrec->in_validation);
|
||||
|
||||
ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
|
||||
if (ret) {
|
||||
status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
|
||||
if (status) {
|
||||
free_io_failure(failure_tree, io_tree, failrec);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return status;
|
||||
}
|
||||
|
||||
struct btrfs_retry_complete {
|
||||
|
@ -8065,8 +8067,8 @@ end:
|
|||
bio_put(bio);
|
||||
}
|
||||
|
||||
static int __btrfs_correct_data_nocsum(struct inode *inode,
|
||||
struct btrfs_io_bio *io_bio)
|
||||
static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
|
||||
struct btrfs_io_bio *io_bio)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct bio_vec bvec;
|
||||
|
@ -8076,8 +8078,8 @@ static int __btrfs_correct_data_nocsum(struct inode *inode,
|
|||
unsigned int pgoff;
|
||||
u32 sectorsize;
|
||||
int nr_sectors;
|
||||
int ret;
|
||||
int err = 0;
|
||||
blk_status_t ret;
|
||||
blk_status_t err = BLK_STS_OK;
|
||||
|
||||
fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
sectorsize = fs_info->sectorsize;
|
||||
|
@ -8183,11 +8185,12 @@ static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
|
|||
int csum_pos;
|
||||
bool uptodate = (err == 0);
|
||||
int ret;
|
||||
blk_status_t status;
|
||||
|
||||
fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
sectorsize = fs_info->sectorsize;
|
||||
|
||||
err = 0;
|
||||
err = BLK_STS_OK;
|
||||
start = io_bio->logical;
|
||||
done.inode = inode;
|
||||
io_bio->bio.bi_iter = io_bio->iter;
|
||||
|
@ -8209,12 +8212,12 @@ try_again:
|
|||
done.start = start;
|
||||
init_completion(&done.done);
|
||||
|
||||
ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
|
||||
pgoff, start, start + sectorsize - 1,
|
||||
io_bio->mirror_num,
|
||||
btrfs_retry_endio, &done);
|
||||
if (ret) {
|
||||
err = errno_to_blk_status(ret);
|
||||
status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
|
||||
pgoff, start, start + sectorsize - 1,
|
||||
io_bio->mirror_num, btrfs_retry_endio,
|
||||
&done);
|
||||
if (status) {
|
||||
err = status;
|
||||
goto next;
|
||||
}
|
||||
|
||||
|
@ -8250,7 +8253,7 @@ static blk_status_t btrfs_subio_endio_read(struct inode *inode,
|
|||
if (unlikely(err))
|
||||
return __btrfs_correct_data_nocsum(inode, io_bio);
|
||||
else
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
} else {
|
||||
return __btrfs_subio_endio_read(inode, io_bio, err);
|
||||
}
|
||||
|
@ -8423,9 +8426,9 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
|
||||
u64 file_offset, int skip_sum,
|
||||
int async_submit)
|
||||
static inline blk_status_t
|
||||
__btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
|
||||
int skip_sum, int async_submit)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_dio_private *dip = bio->bi_private;
|
||||
|
@ -8488,6 +8491,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
|
|||
int clone_offset = 0;
|
||||
int clone_len;
|
||||
int ret;
|
||||
blk_status_t status;
|
||||
|
||||
map_length = orig_bio->bi_iter.bi_size;
|
||||
submit_len = map_length;
|
||||
|
@ -8537,9 +8541,9 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
|
|||
*/
|
||||
atomic_inc(&dip->pending_bios);
|
||||
|
||||
ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
|
||||
async_submit);
|
||||
if (ret) {
|
||||
status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
|
||||
async_submit);
|
||||
if (status) {
|
||||
bio_put(bio);
|
||||
atomic_dec(&dip->pending_bios);
|
||||
goto out_err;
|
||||
|
@ -8557,9 +8561,9 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
|
|||
} while (submit_len > 0);
|
||||
|
||||
submit:
|
||||
ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
|
||||
async_submit);
|
||||
if (!ret)
|
||||
status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
|
||||
async_submit);
|
||||
if (!status)
|
||||
return 0;
|
||||
|
||||
bio_put(bio);
|
||||
|
|
|
@ -905,7 +905,7 @@ static void raid_write_end_io(struct bio *bio)
|
|||
if (!atomic_dec_and_test(&rbio->stripes_pending))
|
||||
return;
|
||||
|
||||
err = 0;
|
||||
err = BLK_STS_OK;
|
||||
|
||||
/* OK, we have read all the stripes we need to. */
|
||||
max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
|
||||
|
@ -1324,7 +1324,7 @@ write_data:
|
|||
return;
|
||||
|
||||
cleanup:
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1475,7 +1475,7 @@ static void raid_rmw_end_io(struct bio *bio)
|
|||
|
||||
cleanup:
|
||||
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
}
|
||||
|
||||
static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
|
||||
|
@ -1579,7 +1579,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
|
|||
return 0;
|
||||
|
||||
cleanup:
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
return -EIO;
|
||||
|
||||
finish:
|
||||
|
@ -1795,12 +1795,12 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
|
|||
void **pointers;
|
||||
int faila = -1, failb = -1;
|
||||
struct page *page;
|
||||
int err;
|
||||
blk_status_t err;
|
||||
int i;
|
||||
|
||||
pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
|
||||
if (!pointers) {
|
||||
err = -ENOMEM;
|
||||
err = BLK_STS_RESOURCE;
|
||||
goto cleanup_io;
|
||||
}
|
||||
|
||||
|
@ -1856,7 +1856,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
|
|||
* a bad data or Q stripe.
|
||||
* TODO, we should redo the xor here.
|
||||
*/
|
||||
err = -EIO;
|
||||
err = BLK_STS_IOERR;
|
||||
goto cleanup;
|
||||
}
|
||||
/*
|
||||
|
@ -1882,7 +1882,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
|
|||
if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
|
||||
if (rbio->bbio->raid_map[faila] ==
|
||||
RAID5_P_STRIPE) {
|
||||
err = -EIO;
|
||||
err = BLK_STS_IOERR;
|
||||
goto cleanup;
|
||||
}
|
||||
/*
|
||||
|
@ -1954,13 +1954,13 @@ pstripe:
|
|||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
err = BLK_STS_OK;
|
||||
cleanup:
|
||||
kfree(pointers);
|
||||
|
||||
cleanup_io:
|
||||
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
|
||||
if (err == 0)
|
||||
if (err == BLK_STS_OK)
|
||||
cache_rbio_pages(rbio);
|
||||
else
|
||||
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
|
||||
|
@ -1968,7 +1968,7 @@ cleanup_io:
|
|||
rbio_orig_end_io(rbio, err);
|
||||
} else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
|
||||
rbio_orig_end_io(rbio, err);
|
||||
} else if (err == 0) {
|
||||
} else if (err == BLK_STS_OK) {
|
||||
rbio->faila = -1;
|
||||
rbio->failb = -1;
|
||||
|
||||
|
@ -2005,7 +2005,7 @@ static void raid_recover_end_io(struct bio *bio)
|
|||
return;
|
||||
|
||||
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
else
|
||||
__raid_recover_end_io(rbio);
|
||||
}
|
||||
|
@ -2104,7 +2104,7 @@ out:
|
|||
cleanup:
|
||||
if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
|
||||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -2431,7 +2431,7 @@ submit_write:
|
|||
nr_data = bio_list_size(&bio_list);
|
||||
if (!nr_data) {
|
||||
/* Every parity is right */
|
||||
rbio_orig_end_io(rbio, 0);
|
||||
rbio_orig_end_io(rbio, BLK_STS_OK);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2451,7 +2451,7 @@ submit_write:
|
|||
return;
|
||||
|
||||
cleanup:
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
}
|
||||
|
||||
static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
|
||||
|
@ -2519,7 +2519,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
|
|||
return;
|
||||
|
||||
cleanup:
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2633,7 +2633,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
|
|||
return;
|
||||
|
||||
cleanup:
|
||||
rbio_orig_end_io(rbio, -EIO);
|
||||
rbio_orig_end_io(rbio, BLK_STS_IOERR);
|
||||
return;
|
||||
|
||||
finish:
|
||||
|
|
|
@ -6212,8 +6212,8 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
|
|||
}
|
||||
}
|
||||
|
||||
int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit)
|
||||
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit)
|
||||
{
|
||||
struct btrfs_device *dev;
|
||||
struct bio *first_bio = bio;
|
||||
|
@ -6233,7 +6233,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
&map_length, &bbio, mirror_num, 1);
|
||||
if (ret) {
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
return ret;
|
||||
return errno_to_blk_status(ret);
|
||||
}
|
||||
|
||||
total_devs = bbio->num_stripes;
|
||||
|
@ -6256,7 +6256,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
}
|
||||
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
return ret;
|
||||
return errno_to_blk_status(ret);
|
||||
}
|
||||
|
||||
if (map_length < length) {
|
||||
|
@ -6283,7 +6283,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
dev_nr, async_submit);
|
||||
}
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
|
||||
|
|
|
@ -74,7 +74,7 @@ struct btrfs_device {
|
|||
int missing;
|
||||
int can_discard;
|
||||
int is_tgtdev_for_dev_replace;
|
||||
int last_flush_error;
|
||||
blk_status_t last_flush_error;
|
||||
int flush_bio_sent;
|
||||
|
||||
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
|
||||
|
@ -416,8 +416,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_fs_info *fs_info, u64 type);
|
||||
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
|
||||
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
|
||||
int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit);
|
||||
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
||||
int mirror_num, int async_submit);
|
||||
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
||||
fmode_t flags, void *holder);
|
||||
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
||||
|
|
|
@ -133,6 +133,50 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
|
|||
return sb->s_fs_info;
|
||||
}
|
||||
|
||||
static int devpts_ptmx_path(struct path *path)
|
||||
{
|
||||
struct super_block *sb;
|
||||
int err;
|
||||
|
||||
/* Has the devpts filesystem already been found? */
|
||||
if (path->mnt->mnt_sb->s_magic == DEVPTS_SUPER_MAGIC)
|
||||
return 0;
|
||||
|
||||
/* Is a devpts filesystem at "pts" in the same directory? */
|
||||
err = path_pts(path);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Is the path the root of a devpts filesystem? */
|
||||
sb = path->mnt->mnt_sb;
|
||||
if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
|
||||
(path->mnt->mnt_root != sb->s_root))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
|
||||
{
|
||||
struct path path;
|
||||
int err;
|
||||
|
||||
path = filp->f_path;
|
||||
path_get(&path);
|
||||
|
||||
err = devpts_ptmx_path(&path);
|
||||
dput(path.dentry);
|
||||
if (err) {
|
||||
mntput(path.mnt);
|
||||
path.mnt = ERR_PTR(err);
|
||||
}
|
||||
if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
|
||||
mntput(path.mnt);
|
||||
path.mnt = ERR_PTR(-ENODEV);
|
||||
}
|
||||
return path.mnt;
|
||||
}
|
||||
|
||||
struct pts_fs_info *devpts_acquire(struct file *filp)
|
||||
{
|
||||
struct pts_fs_info *result;
|
||||
|
@ -143,27 +187,16 @@ struct pts_fs_info *devpts_acquire(struct file *filp)
|
|||
path = filp->f_path;
|
||||
path_get(&path);
|
||||
|
||||
/* Has the devpts filesystem already been found? */
|
||||
sb = path.mnt->mnt_sb;
|
||||
if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
|
||||
/* Is a devpts filesystem at "pts" in the same directory? */
|
||||
err = path_pts(&path);
|
||||
if (err) {
|
||||
result = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Is the path the root of a devpts filesystem? */
|
||||
result = ERR_PTR(-ENODEV);
|
||||
sb = path.mnt->mnt_sb;
|
||||
if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
|
||||
(path.mnt->mnt_root != sb->s_root))
|
||||
goto out;
|
||||
err = devpts_ptmx_path(&path);
|
||||
if (err) {
|
||||
result = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* pty code needs to hold extra references in case of last /dev/tty close
|
||||
*/
|
||||
sb = path.mnt->mnt_sb;
|
||||
atomic_inc(&sb->s_active);
|
||||
result = DEVPTS_SB(sb);
|
||||
|
||||
|
|
|
@ -59,6 +59,22 @@
|
|||
/* Align . to a 8 byte boundary equals to maximum function alignment. */
|
||||
#define ALIGN_FUNCTION() . = ALIGN(8)
|
||||
|
||||
/*
|
||||
* LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
|
||||
* generates .data.identifier sections, which need to be pulled in with
|
||||
* .data. We don't want to pull in .data..other sections, which Linux
|
||||
* has defined. Same for text and bss.
|
||||
*/
|
||||
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
|
||||
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
|
||||
#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
|
||||
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
|
||||
#else
|
||||
#define TEXT_MAIN .text
|
||||
#define DATA_MAIN .data
|
||||
#define BSS_MAIN .bss
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Align to a 32 byte boundary equal to the
|
||||
* alignment gcc 4.5 uses for a struct
|
||||
|
@ -198,12 +214,9 @@
|
|||
|
||||
/*
|
||||
* .data section
|
||||
* LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
|
||||
* .data.identifier which needs to be pulled in with .data, but don't want to
|
||||
* pull in .data..stuff which has its own requirements. Same for bss.
|
||||
*/
|
||||
#define DATA_DATA \
|
||||
*(.data .data.[0-9a-zA-Z_]*) \
|
||||
*(DATA_MAIN) \
|
||||
*(.ref.data) \
|
||||
*(.data..shared_aligned) /* percpu related */ \
|
||||
MEM_KEEP(init.data) \
|
||||
|
@ -434,16 +447,17 @@
|
|||
VMLINUX_SYMBOL(__security_initcall_end) = .; \
|
||||
}
|
||||
|
||||
/* .text section. Map to function alignment to avoid address changes
|
||||
/*
|
||||
* .text section. Map to function alignment to avoid address changes
|
||||
* during second ld run in second ld pass when generating System.map
|
||||
* LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
|
||||
* .text.identifier which needs to be pulled in with .text , but some
|
||||
* architectures define .text.foo which is not intended to be pulled in here.
|
||||
* Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
|
||||
* conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
|
||||
*
|
||||
* TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
|
||||
* code elimination is enabled, so these sections should be converted
|
||||
* to use ".." first.
|
||||
*/
|
||||
#define TEXT_TEXT \
|
||||
ALIGN_FUNCTION(); \
|
||||
*(.text.hot .text .text.fixup .text.unlikely) \
|
||||
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
|
||||
*(.ref.text) \
|
||||
MEM_KEEP(init.text) \
|
||||
MEM_KEEP(exit.text) \
|
||||
|
@ -605,7 +619,7 @@
|
|||
BSS_FIRST_SECTIONS \
|
||||
*(.bss..page_aligned) \
|
||||
*(.dynbss) \
|
||||
*(.bss .bss.[0-9a-zA-Z_]*) \
|
||||
*(BSS_MAIN) \
|
||||
*(COMMON) \
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
struct pts_fs_info;
|
||||
|
||||
struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *);
|
||||
struct pts_fs_info *devpts_acquire(struct file *);
|
||||
void devpts_release(struct pts_fs_info *);
|
||||
|
||||
|
@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *);
|
|||
/* unlink */
|
||||
void devpts_pty_kill(struct dentry *);
|
||||
|
||||
/* in pty.c */
|
||||
int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags);
|
||||
|
||||
#else
|
||||
static inline int
|
||||
ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -1683,6 +1683,7 @@ struct ib_qp {
|
|||
enum ib_qp_type qp_type;
|
||||
struct ib_rwq_ind_table *rwq_ind_tbl;
|
||||
struct ib_qp_security *qp_sec;
|
||||
u8 port;
|
||||
};
|
||||
|
||||
struct ib_mr {
|
||||
|
|
|
@ -10041,28 +10041,27 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
goto err_context;
|
||||
|
||||
/*
|
||||
* Do not allow to attach to a group in a different
|
||||
* task or CPU context:
|
||||
* Make sure we're both events for the same CPU;
|
||||
* grouping events for different CPUs is broken; since
|
||||
* you can never concurrently schedule them anyhow.
|
||||
*/
|
||||
if (move_group) {
|
||||
/*
|
||||
* Make sure we're both on the same task, or both
|
||||
* per-cpu events.
|
||||
*/
|
||||
if (group_leader->ctx->task != ctx->task)
|
||||
goto err_context;
|
||||
if (group_leader->cpu != event->cpu)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Make sure we're both events for the same CPU;
|
||||
* grouping events for different CPUs is broken; since
|
||||
* you can never concurrently schedule them anyhow.
|
||||
*/
|
||||
if (group_leader->cpu != event->cpu)
|
||||
goto err_context;
|
||||
} else {
|
||||
if (group_leader->ctx != ctx)
|
||||
goto err_context;
|
||||
}
|
||||
/*
|
||||
* Make sure we're both on the same task, or both
|
||||
* per-CPU events.
|
||||
*/
|
||||
if (group_leader->ctx->task != ctx->task)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Do not allow to attach to a group in a different task
|
||||
* or CPU context. If we're moving SW events, we'll fix
|
||||
* this up later, so allow that.
|
||||
*/
|
||||
if (!move_group && group_leader->ctx != ctx)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Only a group leader can be exclusive or pinned
|
||||
|
|
|
@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
|
|||
|
||||
function_profile_call(trace->func, 0, NULL, NULL);
|
||||
|
||||
/* If function graph is shutting down, ret_stack can be NULL */
|
||||
if (!current->ret_stack)
|
||||
return 0;
|
||||
|
||||
if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
|
||||
current->ret_stack[index].subtime = 0;
|
||||
|
||||
|
|
|
@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
|||
* the page that was allocated, with the read page of the buffer.
|
||||
*
|
||||
* Returns:
|
||||
* The page allocated, or NULL on error.
|
||||
* The page allocated, or ERR_PTR
|
||||
*/
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_data_page *bpage = NULL;
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
|
@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
|||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bpage = page_address(page);
|
||||
|
||||
|
@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
|
|||
*
|
||||
* for example:
|
||||
* rpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
* if (!rpage)
|
||||
* return error;
|
||||
* if (IS_ERR(rpage))
|
||||
* return PTR_ERR(rpage);
|
||||
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
|
||||
* if (ret >= 0)
|
||||
* process_page(rpage, ret);
|
||||
|
|
|
@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
|
|||
int i;
|
||||
|
||||
bpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
if (!bpage)
|
||||
if (IS_ERR(bpage))
|
||||
return EVENT_DROPPED;
|
||||
|
||||
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
|
||||
|
|
|
@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|||
{
|
||||
struct ftrace_buffer_info *info = filp->private_data;
|
||||
struct trace_iterator *iter = &info->iter;
|
||||
ssize_t ret;
|
||||
ssize_t ret = 0;
|
||||
ssize_t size;
|
||||
|
||||
if (!count)
|
||||
|
@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|||
if (!info->spare) {
|
||||
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
|
||||
iter->cpu_file);
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
if (IS_ERR(info->spare)) {
|
||||
ret = PTR_ERR(info->spare);
|
||||
info->spare = NULL;
|
||||
} else {
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
}
|
||||
}
|
||||
if (!info->spare)
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
|
||||
/* Do we have previous read data to read? */
|
||||
if (info->read < PAGE_SIZE)
|
||||
|
@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
ref->ref = 1;
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (!ref->page) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(ref->page)) {
|
||||
ret = PTR_ERR(ref->page);
|
||||
ref->page = NULL;
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
|
@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void)
|
|||
if (ret < 0)
|
||||
goto out_free_cpumask;
|
||||
/* Used for event triggers */
|
||||
ret = -ENOMEM;
|
||||
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
|
||||
if (!temp_buffer)
|
||||
goto out_rm_hp_state;
|
||||
|
@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void)
|
|||
}
|
||||
|
||||
fs_initcall(tracer_init_tracefs);
|
||||
late_initcall(clear_boot_tracer);
|
||||
late_initcall_sync(clear_boot_tracer);
|
||||
|
|
|
@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
|
|||
if (err && set_str)
|
||||
append_filter_err(ps, filter);
|
||||
}
|
||||
if (err && !set_str) {
|
||||
free_event_filter(filter);
|
||||
filter = NULL;
|
||||
}
|
||||
create_filter_finish(ps);
|
||||
|
||||
*filterp = filter;
|
||||
|
|
|
@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
|
|||
if (!a)
|
||||
return;
|
||||
|
||||
if (!a->pages) {
|
||||
kfree(a);
|
||||
return;
|
||||
}
|
||||
if (!a->pages)
|
||||
goto free;
|
||||
|
||||
for (i = 0; i < a->n_pages; i++) {
|
||||
if (!a->pages[i])
|
||||
break;
|
||||
free_page((unsigned long)a->pages[i]);
|
||||
}
|
||||
|
||||
kfree(a->pages);
|
||||
|
||||
free:
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
|
||||
|
|
|
@ -85,8 +85,8 @@ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
|
|||
|
||||
# try-run
|
||||
# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
|
||||
# Exit code chooses option. "$$TMP" is can be used as temporary file and
|
||||
# is automatically cleaned up.
|
||||
# Exit code chooses option. "$$TMP" serves as a temporary file and is
|
||||
# automatically cleaned up.
|
||||
try-run = $(shell set -e; \
|
||||
TMP="$(TMPOUT).$$$$.tmp"; \
|
||||
TMPO="$(TMPOUT).$$$$.o"; \
|
||||
|
@ -261,7 +261,6 @@ make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
|
|||
any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
|
||||
|
||||
# Execute command if command has changed or prerequisite(s) are updated.
|
||||
#
|
||||
if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
|
||||
@set -e; \
|
||||
$(echo-cmd) $(cmd_$(1)); \
|
||||
|
@ -315,7 +314,7 @@ if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \
|
|||
$(rule_$(1)), @:)
|
||||
|
||||
###
|
||||
# why - tell why a a target got build
|
||||
# why - tell why a target got built
|
||||
# enabled by make V=2
|
||||
# Output (listed in the order they are checked):
|
||||
# (1) - due to target is PHONY
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# include/asm-generic contains a lot of files that are used
|
||||
# verbatim by several architectures.
|
||||
#
|
||||
# This Makefile reads the file arch/$(SRCARCH)/include/asm/Kbuild
|
||||
# This Makefile reads the file arch/$(SRCARCH)/include/$(src)/Kbuild
|
||||
# and for each file listed in this file with generic-y creates
|
||||
# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/asm)
|
||||
# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/$(src))
|
||||
|
||||
kbuild-file := $(srctree)/arch/$(SRCARCH)/include/$(src)/Kbuild
|
||||
-include $(kbuild-file)
|
||||
|
|
|
@ -229,8 +229,8 @@ ifeq ("$(origin RECORDMCOUNT_WARN)", "command line")
|
|||
endif
|
||||
# Due to recursion, we must skip empty.o.
|
||||
# The empty.o file is created in the make process in order to determine
|
||||
# the target endianness and word size. It is made before all other C
|
||||
# files, including recordmcount.
|
||||
# the target endianness and word size. It is made before all other C
|
||||
# files, including recordmcount.
|
||||
sub_cmd_record_mcount = \
|
||||
if [ $(@) != "scripts/mod/empty.o" ]; then \
|
||||
$(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \
|
||||
|
@ -245,13 +245,13 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH
|
|||
"$(LD)" "$(NM)" "$(RM)" "$(MV)" \
|
||||
"$(if $(part-of-module),1,0)" "$(@)";
|
||||
recordmcount_source := $(srctree)/scripts/recordmcount.pl
|
||||
endif
|
||||
endif # BUILD_C_RECORDMCOUNT
|
||||
cmd_record_mcount = \
|
||||
if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \
|
||||
"$(CC_FLAGS_FTRACE)" ]; then \
|
||||
$(sub_cmd_record_mcount) \
|
||||
fi;
|
||||
endif
|
||||
endif # CONFIG_FTRACE_MCOUNT_RECORD
|
||||
|
||||
ifdef CONFIG_STACK_VALIDATION
|
||||
ifneq ($(SKIP_STACK_VALIDATION),1)
|
||||
|
|
|
@ -14,7 +14,7 @@ src := $(obj)
|
|||
PHONY := __dtbs_install
|
||||
__dtbs_install:
|
||||
|
||||
export dtbinst-root ?= $(obj)
|
||||
export dtbinst_root ?= $(obj)
|
||||
|
||||
include include/config/auto.conf
|
||||
include scripts/Kbuild.include
|
||||
|
@ -27,7 +27,7 @@ dtbinst-dirs := $(dts-dirs)
|
|||
quiet_cmd_dtb_install = INSTALL $<
|
||||
cmd_dtb_install = mkdir -p $(2); cp $< $(2)
|
||||
|
||||
install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj))
|
||||
install-dir = $(patsubst $(dtbinst_root)%,$(INSTALL_DTBS_PATH)%,$(obj))
|
||||
|
||||
$(dtbinst-files): %.dtb: $(obj)/%.dtb
|
||||
$(call cmd,dtb_install,$(install-dir))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
###
|
||||
# Makefile.basic lists the most basic programs used during the build process.
|
||||
# This Makefile lists the most basic programs used during the build process.
|
||||
# The programs listed herein are what are needed to do the basic stuff,
|
||||
# such as fix file dependencies.
|
||||
# This initial step is needed to avoid files to be recompiled
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
*
|
||||
* So we play the same trick that "mkdep" played before. We replace
|
||||
* the dependency on autoconf.h by a dependency on every config
|
||||
* option which is mentioned in any of the listed prequisites.
|
||||
* option which is mentioned in any of the listed prerequisites.
|
||||
*
|
||||
* kconfig populates a tree in include/config/ with an empty file
|
||||
* for each config symbol and when the configuration is updated
|
||||
|
@ -34,7 +34,7 @@
|
|||
* the config symbols are rebuilt.
|
||||
*
|
||||
* So if the user changes his CONFIG_HIS_DRIVER option, only the objects
|
||||
* which depend on "include/linux/config/his/driver.h" will be rebuilt,
|
||||
* which depend on "include/config/his/driver.h" will be rebuilt,
|
||||
* so most likely only his driver ;-)
|
||||
*
|
||||
* The idea above dates, by the way, back to Michael E Chastain, AFAIK.
|
||||
|
@ -75,7 +75,7 @@
|
|||
* and then basically copies the .<target>.d file to stdout, in the
|
||||
* process filtering out the dependency on autoconf.h and adding
|
||||
* dependencies on include/config/my/option.h for every
|
||||
* CONFIG_MY_OPTION encountered in any of the prequisites.
|
||||
* CONFIG_MY_OPTION encountered in any of the prerequisites.
|
||||
*
|
||||
* It will also filter out all the dependencies on *.ver. We need
|
||||
* to make sure that the generated version checksum are globally up
|
||||
|
|
Loading…
Reference in New Issue