Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: "This is mostly to fix the iwlwifi regression: 1) Flush GRO state properly in iwlwifi driver, from Alexander Lobakin. 2) Validate TIPC link name with properly length macro, from John Rutherford. 3) Fix completion init and device query timeouts in ibmvnic, from Thomas Falcon. 4) Fix SKB size calculation for netlink messages in psample, from Nikolay Aleksandrov. 5) Similar kind of fix for OVS flow dumps, from Paolo Abeni. 6) Handle queue allocation failure unwind properly in gve driver, we could try to release pages we didn't allocate. From Jeroen de Borst. 7) Serialize TX queue SKB list accesses properly in mscc ocelot driver. From Yangbo Lu" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: net: usb: aqc111: Use the correct style for SPDX License Identifier net: phy: Use the correct style for SPDX License Identifier net: wireless: intel: iwlwifi: fix GRO_NORMAL packet stalling net: mscc: ocelot: use skb queue instead of skbs list net: mscc: ocelot: avoid incorrect consuming in skbs list gve: Fix the queue page list allocated pages count net: inet_is_local_reserved_port() port arg should be unsigned short openvswitch: fix flow command message size net: phy: dp83869: Fix return paths to return proper values net: psample: fix skb_over_panic net: usbnet: Fix -Wcast-function-type net: hso: Fix -Wcast-function-type net: port < inet_prot_sock(net) --> inet_port_requires_bind_service(net, port) ibmvnic: Serialize device queries ibmvnic: Bound waits for device queries ibmvnic: Terminate waiting device threads after loss of service ibmvnic: Fix completion structure initialization net-sctp: replace some sock_net(sk) with just 'net' net: Fix a documentation bug wrt. ip_unprivileged_port_start tipc: fix link name length check
This commit is contained in:
commit
8c39f71ee2
|
@ -904,8 +904,9 @@ ip_local_port_range - 2 INTEGERS
|
|||
Defines the local port range that is used by TCP and UDP to
|
||||
choose the local port. The first number is the first, the
|
||||
second the last local port number.
|
||||
If possible, it is better these numbers have different parity.
|
||||
(one even and one odd values)
|
||||
If possible, it is better these numbers have different parity
|
||||
(one even and one odd value).
|
||||
Must be greater than or equal to ip_unprivileged_port_start.
|
||||
The default values are 32768 and 60999 respectively.
|
||||
|
||||
ip_local_reserved_ports - list of comma separated ranges
|
||||
|
@ -943,8 +944,8 @@ ip_unprivileged_port_start - INTEGER
|
|||
This is a per-namespace sysctl. It defines the first
|
||||
unprivileged port in the network namespace. Privileged ports
|
||||
require root or CAP_NET_BIND_SERVICE in order to bind to them.
|
||||
To disable all privileged ports, set this to 0. It may not
|
||||
overlap with the ip_local_reserved_ports range.
|
||||
To disable all privileged ports, set this to 0. They must not
|
||||
overlap with the ip_local_port_range.
|
||||
|
||||
Default: 1024
|
||||
|
||||
|
|
|
@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
|
|||
}
|
||||
|
||||
qpl->id = id;
|
||||
qpl->num_entries = pages;
|
||||
qpl->num_entries = 0;
|
||||
qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
|
||||
/* caller handles clean up */
|
||||
if (!qpl->pages)
|
||||
|
@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
|
|||
/* caller handles clean up */
|
||||
if (err)
|
||||
return -ENOMEM;
|
||||
qpl->num_entries++;
|
||||
}
|
||||
priv->num_registered_pages += pages;
|
||||
|
||||
|
|
|
@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvnic_wait_for_completion - Check device state and wait for completion
|
||||
* @adapter: private device data
|
||||
* @comp_done: completion structure to wait for
|
||||
* @timeout: time to wait in milliseconds
|
||||
*
|
||||
* Wait for a completion signal or until the timeout limit is reached
|
||||
* while checking that the device is still active.
|
||||
*/
|
||||
static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
|
||||
struct completion *comp_done,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
unsigned long div_timeout;
|
||||
u8 retry;
|
||||
|
||||
netdev = adapter->netdev;
|
||||
retry = 5;
|
||||
div_timeout = msecs_to_jiffies(timeout / retry);
|
||||
while (true) {
|
||||
if (!adapter->crq.active) {
|
||||
netdev_err(netdev, "Device down!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (retry--)
|
||||
break;
|
||||
if (wait_for_completion_timeout(comp_done, div_timeout))
|
||||
return 0;
|
||||
}
|
||||
netdev_err(netdev, "Operation timed out.\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
|
||||
struct ibmvnic_long_term_buff *ltb, int size)
|
||||
{
|
||||
|
@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
|
|||
ltb->map_id = adapter->map_id;
|
||||
adapter->map_id++;
|
||||
|
||||
init_completion(&adapter->fw_done);
|
||||
mutex_lock(&adapter->fw_lock);
|
||||
adapter->fw_done_rc = 0;
|
||||
reinit_completion(&adapter->fw_done);
|
||||
rc = send_request_map(adapter, ltb->addr,
|
||||
ltb->size, ltb->map_id);
|
||||
if (rc) {
|
||||
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"Long term map request aborted or timed out,rc = %d\n",
|
||||
rc);
|
||||
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
wait_for_completion(&adapter->fw_done);
|
||||
|
||||
if (adapter->fw_done_rc) {
|
||||
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
|
||||
adapter->fw_done_rc);
|
||||
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return -1;
|
||||
}
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
|
|||
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
|
||||
struct ibmvnic_long_term_buff *ltb)
|
||||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
int rc;
|
||||
|
||||
memset(ltb->buff, 0, ltb->size);
|
||||
|
||||
init_completion(&adapter->fw_done);
|
||||
mutex_lock(&adapter->fw_lock);
|
||||
adapter->fw_done_rc = 0;
|
||||
|
||||
reinit_completion(&adapter->fw_done);
|
||||
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
wait_for_completion(&adapter->fw_done);
|
||||
}
|
||||
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
|
||||
if (rc) {
|
||||
dev_info(dev,
|
||||
"Reset failed, long term map request timed out or aborted\n");
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (adapter->fw_done_rc) {
|
||||
dev_info(&adapter->vdev->dev,
|
||||
dev_info(dev,
|
||||
"Reset failed, attempting to free and reallocate buffer\n");
|
||||
free_long_term_buff(adapter, ltb);
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return alloc_long_term_buff(adapter, ltb, ltb->size);
|
||||
}
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
|
|||
if (adapter->vpd->buff)
|
||||
len = adapter->vpd->len;
|
||||
|
||||
init_completion(&adapter->fw_done);
|
||||
mutex_lock(&adapter->fw_lock);
|
||||
adapter->fw_done_rc = 0;
|
||||
reinit_completion(&adapter->fw_done);
|
||||
|
||||
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
|
||||
crq.get_vpd_size.cmd = GET_VPD_SIZE;
|
||||
rc = ibmvnic_send_crq(adapter, &crq);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
wait_for_completion(&adapter->fw_done);
|
||||
}
|
||||
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
|
||||
if (rc) {
|
||||
dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
|
||||
if (!adapter->vpd->len)
|
||||
return -ENODATA;
|
||||
|
@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&adapter->fw_lock);
|
||||
adapter->fw_done_rc = 0;
|
||||
reinit_completion(&adapter->fw_done);
|
||||
|
||||
crq.get_vpd.first = IBMVNIC_CRQ_CMD;
|
||||
crq.get_vpd.cmd = GET_VPD;
|
||||
crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
|
||||
|
@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
|
|||
if (rc) {
|
||||
kfree(adapter->vpd->buff);
|
||||
adapter->vpd->buff = NULL;
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
wait_for_completion(&adapter->fw_done);
|
||||
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
|
||||
if (rc) {
|
||||
dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
|
||||
kfree(adapter->vpd->buff);
|
||||
adapter->vpd->buff = NULL;
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
|
|||
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
|
||||
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
|
||||
|
||||
init_completion(&adapter->fw_done);
|
||||
mutex_lock(&adapter->fw_lock);
|
||||
adapter->fw_done_rc = 0;
|
||||
reinit_completion(&adapter->fw_done);
|
||||
|
||||
rc = ibmvnic_send_crq(adapter, &crq);
|
||||
if (rc) {
|
||||
rc = -EIO;
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
wait_for_completion(&adapter->fw_done);
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
|
||||
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
|
||||
if (adapter->fw_done_rc) {
|
||||
if (rc || adapter->fw_done_rc) {
|
||||
rc = -EIO;
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return 0;
|
||||
err:
|
||||
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
|
||||
|
@ -2316,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
|||
adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
|
||||
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
|
||||
|
||||
init_completion(&adapter->reset_done);
|
||||
reinit_completion(&adapter->reset_done);
|
||||
adapter->wait_for_reset = true;
|
||||
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||
if (rc)
|
||||
return rc;
|
||||
wait_for_completion(&adapter->reset_done);
|
||||
|
||||
if (rc) {
|
||||
ret = rc;
|
||||
goto out;
|
||||
}
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
|
||||
if (rc) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (adapter->reset_done_rc) {
|
||||
|
@ -2332,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
|||
adapter->desired.rx_entries = adapter->fallback.rx_entries;
|
||||
adapter->desired.tx_entries = adapter->fallback.tx_entries;
|
||||
|
||||
init_completion(&adapter->reset_done);
|
||||
reinit_completion(&adapter->reset_done);
|
||||
adapter->wait_for_reset = true;
|
||||
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||
if (rc)
|
||||
return ret;
|
||||
wait_for_completion(&adapter->reset_done);
|
||||
if (rc) {
|
||||
ret = rc;
|
||||
goto out;
|
||||
}
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
|
||||
60000);
|
||||
if (rc) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
adapter->wait_for_reset = false;
|
||||
|
||||
return ret;
|
||||
|
@ -2603,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
|
|||
cpu_to_be32(sizeof(struct ibmvnic_statistics));
|
||||
|
||||
/* Wait for data to be written */
|
||||
init_completion(&adapter->stats_done);
|
||||
reinit_completion(&adapter->stats_done);
|
||||
rc = ibmvnic_send_crq(adapter, &crq);
|
||||
if (rc)
|
||||
return;
|
||||
wait_for_completion(&adapter->stats_done);
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
|
||||
data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
|
||||
|
@ -4408,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
|
|||
memset(&crq, 0, sizeof(crq));
|
||||
crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
|
||||
crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
|
||||
init_completion(&adapter->fw_done);
|
||||
|
||||
mutex_lock(&adapter->fw_lock);
|
||||
adapter->fw_done_rc = 0;
|
||||
reinit_completion(&adapter->fw_done);
|
||||
|
||||
rc = ibmvnic_send_crq(adapter, &crq);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
wait_for_completion(&adapter->fw_done);
|
||||
}
|
||||
|
||||
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
|
||||
if (rc) {
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
mutex_unlock(&adapter->fw_lock);
|
||||
return adapter->fw_done_rc ? -EIO : 0;
|
||||
}
|
||||
|
||||
|
@ -4505,6 +4628,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
case IBMVNIC_CRQ_XPORT_EVENT:
|
||||
netif_carrier_off(netdev);
|
||||
adapter->crq.active = false;
|
||||
/* terminate any thread waiting for a response
|
||||
* from the device
|
||||
*/
|
||||
if (!completion_done(&adapter->fw_done)) {
|
||||
adapter->fw_done_rc = -EIO;
|
||||
complete(&adapter->fw_done);
|
||||
}
|
||||
if (!completion_done(&adapter->stats_done))
|
||||
complete(&adapter->stats_done);
|
||||
if (test_bit(0, &adapter->resetting))
|
||||
adapter->force_reset_recovery = true;
|
||||
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
|
||||
|
@ -4959,7 +5091,11 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
__ibmvnic_delayed_reset);
|
||||
INIT_LIST_HEAD(&adapter->rwi_list);
|
||||
spin_lock_init(&adapter->rwi_lock);
|
||||
mutex_init(&adapter->fw_lock);
|
||||
init_completion(&adapter->init_done);
|
||||
init_completion(&adapter->fw_done);
|
||||
init_completion(&adapter->reset_done);
|
||||
init_completion(&adapter->stats_done);
|
||||
clear_bit(0, &adapter->resetting);
|
||||
|
||||
do {
|
||||
|
@ -5017,6 +5153,7 @@ ibmvnic_stats_fail:
|
|||
ibmvnic_init_fail:
|
||||
release_sub_crqs(adapter, 1);
|
||||
release_crq_queue(adapter);
|
||||
mutex_destroy(&adapter->fw_lock);
|
||||
free_netdev(netdev);
|
||||
|
||||
return rc;
|
||||
|
@ -5041,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
|
|||
adapter->state = VNIC_REMOVED;
|
||||
|
||||
rtnl_unlock();
|
||||
mutex_destroy(&adapter->fw_lock);
|
||||
device_remove_file(&dev->dev, &dev_attr_failover);
|
||||
free_netdev(netdev);
|
||||
dev_set_drvdata(&dev->dev, NULL);
|
||||
|
|
|
@ -1026,6 +1026,8 @@ struct ibmvnic_adapter {
|
|||
int init_done_rc;
|
||||
|
||||
struct completion fw_done;
|
||||
/* Used for serialization of device commands */
|
||||
struct mutex fw_lock;
|
||||
int fw_done_rc;
|
||||
|
||||
struct completion reset_done;
|
||||
|
|
|
@ -583,18 +583,10 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
|
|||
|
||||
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
|
||||
ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
|
||||
struct ocelot_skb *oskb =
|
||||
kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!oskb))
|
||||
return -ENOMEM;
|
||||
|
||||
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
|
||||
oskb->skb = skb;
|
||||
oskb->id = ocelot_port->ts_id % 4;
|
||||
|
||||
list_add_tail(&oskb->head, &ocelot_port->skbs);
|
||||
/* Store timestamp ID in cb[0] of sk_buff */
|
||||
skb->cb[0] = ocelot_port->ts_id % 4;
|
||||
skb_queue_tail(&ocelot_port->tx_skbs, skb);
|
||||
return 0;
|
||||
}
|
||||
return -ENODATA;
|
||||
|
@ -704,12 +696,11 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
|
|||
int budget = OCELOT_PTP_QUEUE_SZ;
|
||||
|
||||
while (budget--) {
|
||||
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
struct list_head *pos, *tmp;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct ocelot_skb *entry;
|
||||
struct ocelot_port *port;
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
u32 val, id, txport;
|
||||
|
||||
val = ocelot_read(ocelot, SYS_PTP_STATUS);
|
||||
|
@ -727,21 +718,22 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
|
|||
/* Retrieve its associated skb */
|
||||
port = ocelot->ports[txport];
|
||||
|
||||
list_for_each_safe(pos, tmp, &port->skbs) {
|
||||
entry = list_entry(pos, struct ocelot_skb, head);
|
||||
if (entry->id != id)
|
||||
spin_lock_irqsave(&port->tx_skbs.lock, flags);
|
||||
|
||||
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
|
||||
if (skb->cb[0] != id)
|
||||
continue;
|
||||
|
||||
skb = entry->skb;
|
||||
|
||||
list_del(pos);
|
||||
kfree(entry);
|
||||
__skb_unlink(skb, &port->tx_skbs);
|
||||
skb_match = skb;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
|
||||
|
||||
/* Next ts */
|
||||
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
|
||||
|
||||
if (unlikely(!skb))
|
||||
if (unlikely(!skb_match))
|
||||
continue;
|
||||
|
||||
/* Get the h/w timestamp */
|
||||
|
@ -750,9 +742,9 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
|
|||
/* Set the timestamp into the skb */
|
||||
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
||||
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
|
||||
skb_tstamp_tx(skb, &shhwtstamps);
|
||||
skb_tstamp_tx(skb_match, &shhwtstamps);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
dev_kfree_skb_any(skb_match);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_get_txtstamp);
|
||||
|
@ -2205,7 +2197,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
|
|||
{
|
||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||
|
||||
INIT_LIST_HEAD(&ocelot_port->skbs);
|
||||
skb_queue_head_init(&ocelot_port->tx_skbs);
|
||||
|
||||
/* Basic L2 initialization */
|
||||
|
||||
|
@ -2490,9 +2482,7 @@ EXPORT_SYMBOL(ocelot_init);
|
|||
|
||||
void ocelot_deinit(struct ocelot *ocelot)
|
||||
{
|
||||
struct list_head *pos, *tmp;
|
||||
struct ocelot_port *port;
|
||||
struct ocelot_skb *entry;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work(&ocelot->stats_work);
|
||||
|
@ -2502,14 +2492,7 @@ void ocelot_deinit(struct ocelot *ocelot)
|
|||
|
||||
for (i = 0; i < ocelot->num_phys_ports; i++) {
|
||||
port = ocelot->ports[i];
|
||||
|
||||
list_for_each_safe(pos, tmp, &port->skbs) {
|
||||
entry = list_entry(pos, struct ocelot_skb, head);
|
||||
|
||||
list_del(pos);
|
||||
dev_kfree_skb_any(entry->skb);
|
||||
kfree(entry);
|
||||
}
|
||||
skb_queue_purge(&port->tx_skbs);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_deinit);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* HWMON driver for Aquantia PHY
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* HWMON driver for Aquantia PHY
|
||||
*
|
||||
* Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
|
||||
* Author: Andrew Lunn <andrew@lunn.ch>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2015 Broadcom Corporation
|
||||
*/
|
||||
|
|
|
@ -151,13 +151,13 @@ static int dp83869_config_port_mirroring(struct phy_device *phydev)
|
|||
struct dp83869_private *dp83869 = phydev->priv;
|
||||
|
||||
if (dp83869->port_mirroring == DP83869_PORT_MIRRORING_EN)
|
||||
phy_set_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
|
||||
DP83869_CFG3_PORT_MIRROR_EN);
|
||||
return phy_set_bits_mmd(phydev, DP83869_DEVADDR,
|
||||
DP83869_GEN_CFG3,
|
||||
DP83869_CFG3_PORT_MIRROR_EN);
|
||||
else
|
||||
phy_clear_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
|
||||
DP83869_CFG3_PORT_MIRROR_EN);
|
||||
|
||||
return 0;
|
||||
return phy_clear_bits_mmd(phydev, DP83869_DEVADDR,
|
||||
DP83869_GEN_CFG3,
|
||||
DP83869_CFG3_PORT_MIRROR_EN);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF_MDIO
|
||||
|
@ -204,7 +204,7 @@ static int dp83869_of_init(struct phy_device *phydev)
|
|||
&dp83869->tx_fifo_depth))
|
||||
dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static int dp83869_of_init(struct phy_device *phydev)
|
||||
|
@ -216,7 +216,7 @@ static int dp83869_of_init(struct phy_device *phydev)
|
|||
static int dp83869_configure_rgmii(struct phy_device *phydev,
|
||||
struct dp83869_private *dp83869)
|
||||
{
|
||||
int ret, val;
|
||||
int ret = 0, val;
|
||||
|
||||
if (phy_interface_is_rgmii(phydev)) {
|
||||
val = phy_read(phydev, MII_DP83869_PHYCTRL);
|
||||
|
@ -233,13 +233,13 @@ static int dp83869_configure_rgmii(struct phy_device *phydev,
|
|||
}
|
||||
|
||||
if (dp83869->io_impedance >= 0)
|
||||
phy_modify_mmd(phydev, DP83869_DEVADDR,
|
||||
DP83869_IO_MUX_CFG,
|
||||
DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
|
||||
dp83869->io_impedance &
|
||||
DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
|
||||
ret = phy_modify_mmd(phydev, DP83869_DEVADDR,
|
||||
DP83869_IO_MUX_CFG,
|
||||
DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
|
||||
dp83869->io_impedance &
|
||||
DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dp83869_configure_mode(struct phy_device *phydev,
|
||||
|
@ -284,9 +284,11 @@ static int dp83869_configure_mode(struct phy_device *phydev,
|
|||
return ret;
|
||||
break;
|
||||
case DP83869_RGMII_SGMII_BRIDGE:
|
||||
phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
|
||||
DP83869_SGMII_RGMII_BRIDGE,
|
||||
DP83869_SGMII_RGMII_BRIDGE);
|
||||
ret = phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
|
||||
DP83869_SGMII_RGMII_BRIDGE,
|
||||
DP83869_SGMII_RGMII_BRIDGE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = phy_write_mmd(phydev, DP83869_DEVADDR,
|
||||
DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
|
||||
|
@ -334,7 +336,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
|
|||
return -EINVAL;
|
||||
};
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dp83869_config_init(struct phy_device *phydev)
|
||||
|
@ -358,12 +360,13 @@ static int dp83869_config_init(struct phy_device *phydev)
|
|||
|
||||
/* Clock output selection if muxing property is set */
|
||||
if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK)
|
||||
phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_IO_MUX_CFG,
|
||||
DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
|
||||
dp83869->clk_output_sel <<
|
||||
DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
|
||||
ret = phy_modify_mmd(phydev,
|
||||
DP83869_DEVADDR, DP83869_IO_MUX_CFG,
|
||||
DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
|
||||
dp83869->clk_output_sel <<
|
||||
DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dp83869_probe(struct phy_device *phydev)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2009-2016 Cavium, Inc.
|
||||
*/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* MDIO I2C bridge
|
||||
*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/* Applied Micro X-Gene SoC MDIO Driver
|
||||
*
|
||||
* Copyright (c) 2016, Applied Micro Circuits Corporation
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
|
||||
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
|
||||
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
|
||||
* Copyright (C) 2002-2003 TiVo Inc.
|
||||
|
|
|
@ -1214,8 +1214,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
|
|||
* This needs to be a tasklet otherwise we will
|
||||
* end up recursively calling this function.
|
||||
*/
|
||||
static void hso_unthrottle_tasklet(struct hso_serial *serial)
|
||||
static void hso_unthrottle_tasklet(unsigned long data)
|
||||
{
|
||||
struct hso_serial *serial = (struct hso_serial *)data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&serial->serial_lock, flags);
|
||||
|
@ -1265,7 +1266,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
|
|||
/* Force default termio settings */
|
||||
_hso_serial_set_termios(tty, NULL);
|
||||
tasklet_init(&serial->unthrottle_tasklet,
|
||||
(void (*)(unsigned long))hso_unthrottle_tasklet,
|
||||
hso_unthrottle_tasklet,
|
||||
(unsigned long)serial);
|
||||
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
|
||||
if (result) {
|
||||
|
|
|
@ -1573,6 +1573,13 @@ static void usbnet_bh (struct timer_list *t)
|
|||
}
|
||||
}
|
||||
|
||||
static void usbnet_bh_tasklet(unsigned long data)
|
||||
{
|
||||
struct timer_list *t = (struct timer_list *)data;
|
||||
|
||||
usbnet_bh(t);
|
||||
}
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
|
@ -1700,7 +1707,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||
skb_queue_head_init (&dev->txq);
|
||||
skb_queue_head_init (&dev->done);
|
||||
skb_queue_head_init(&dev->rxq_pause);
|
||||
dev->bh.func = (void (*)(unsigned long))usbnet_bh;
|
||||
dev->bh.func = usbnet_bh_tasklet;
|
||||
dev->bh.data = (unsigned long)&dev->delay;
|
||||
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
|
||||
init_usb_anchor(&dev->deferred);
|
||||
|
|
|
@ -1421,6 +1421,7 @@ out_err:
|
|||
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct napi_struct *napi;
|
||||
struct iwl_rxq *rxq;
|
||||
u32 r, i, count = 0;
|
||||
bool emergency = false;
|
||||
|
@ -1526,8 +1527,16 @@ out:
|
|||
if (unlikely(emergency && count))
|
||||
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
|
||||
|
||||
if (rxq->napi.poll)
|
||||
napi_gro_flush(&rxq->napi, false);
|
||||
napi = &rxq->napi;
|
||||
if (napi->poll) {
|
||||
if (napi->rx_count) {
|
||||
netif_receive_skb_list(&napi->rx_list);
|
||||
INIT_LIST_HEAD(&napi->rx_list);
|
||||
napi->rx_count = 0;
|
||||
}
|
||||
|
||||
napi_gro_flush(napi, false);
|
||||
}
|
||||
|
||||
iwl_pcie_rxq_restock(trans, rxq);
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
|
|||
void inet_get_local_port_range(struct net *net, int *low, int *high);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static inline bool inet_is_local_reserved_port(struct net *net, int port)
|
||||
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
|
||||
{
|
||||
if (!net->ipv4.sysctl_local_reserved_ports)
|
||||
return false;
|
||||
|
@ -351,20 +351,20 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
|
|||
return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
|
||||
}
|
||||
|
||||
static inline int inet_prot_sock(struct net *net)
|
||||
static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
|
||||
{
|
||||
return net->ipv4.sysctl_ip_prot_sock;
|
||||
return port < net->ipv4.sysctl_ip_prot_sock;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline bool inet_is_local_reserved_port(struct net *net, int port)
|
||||
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int inet_prot_sock(struct net *net)
|
||||
static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
|
||||
{
|
||||
return PROT_SOCK;
|
||||
return port < PROT_SOCK;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -406,13 +406,6 @@ struct ocelot_ops {
|
|||
int (*reset)(struct ocelot *ocelot);
|
||||
};
|
||||
|
||||
struct ocelot_skb {
|
||||
struct list_head head;
|
||||
struct sk_buff *skb;
|
||||
u8 id;
|
||||
};
|
||||
|
||||
|
||||
struct ocelot_port {
|
||||
struct ocelot *ocelot;
|
||||
|
||||
|
@ -425,7 +418,7 @@ struct ocelot_port {
|
|||
u16 vid;
|
||||
|
||||
u8 ptp_cmd;
|
||||
struct list_head skbs;
|
||||
struct sk_buff_head tx_skbs;
|
||||
u8 ts_id;
|
||||
};
|
||||
|
||||
|
|
|
@ -495,7 +495,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
|
|||
|
||||
snum = ntohs(addr->sin_port);
|
||||
err = -EACCES;
|
||||
if (snum && snum < inet_prot_sock(net) &&
|
||||
if (snum && inet_port_requires_bind_service(net, snum) &&
|
||||
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
|
|||
return -EINVAL;
|
||||
|
||||
snum = ntohs(addr->sin6_port);
|
||||
if (snum && snum < inet_prot_sock(net) &&
|
||||
if (snum && inet_port_requires_bind_service(net, snum) &&
|
||||
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
|
||||
return -EACCES;
|
||||
|
||||
|
|
|
@ -423,7 +423,7 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
|
|||
|
||||
if (!svc && protocol == IPPROTO_TCP &&
|
||||
atomic_read(&ipvs->ftpsvc_counter) &&
|
||||
(vport == FTPDATA || ntohs(vport) >= inet_prot_sock(ipvs->net))) {
|
||||
(vport == FTPDATA || !inet_port_requires_bind_service(ipvs->net, ntohs(vport)))) {
|
||||
/*
|
||||
* Check if ftp service entry exists, the packet
|
||||
* might belong to FTP data connections.
|
||||
|
|
|
@ -723,9 +723,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
|
|||
{
|
||||
size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
|
||||
|
||||
/* OVS_FLOW_ATTR_UFID */
|
||||
/* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
|
||||
* see ovs_nla_put_identifier()
|
||||
*/
|
||||
if (sfid && ovs_identifier_is_ufid(sfid))
|
||||
len += nla_total_size(sfid->ufid_len);
|
||||
else
|
||||
len += nla_total_size(ovs_key_attr_size());
|
||||
|
||||
/* OVS_FLOW_ATTR_KEY */
|
||||
if (!sfid || should_fill_key(sfid, ufid_flags))
|
||||
|
|
|
@ -229,7 +229,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
|
|||
data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
|
||||
- NLA_ALIGNTO;
|
||||
|
||||
nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
|
||||
nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
|
||||
if (unlikely(!nl_skb))
|
||||
return;
|
||||
|
||||
|
|
|
@ -384,7 +384,7 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
|
|||
}
|
||||
}
|
||||
|
||||
if (snum && snum < inet_prot_sock(net) &&
|
||||
if (snum && inet_port_requires_bind_service(net, snum) &&
|
||||
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
|
||||
return -EACCES;
|
||||
|
||||
|
@ -1061,7 +1061,7 @@ static int sctp_connect_new_asoc(struct sctp_endpoint *ep,
|
|||
if (sctp_autobind(sk))
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
if (ep->base.bind_addr.port < inet_prot_sock(net) &&
|
||||
if (inet_port_requires_bind_service(net, ep->base.bind_addr.port) &&
|
||||
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
|
||||
return -EACCES;
|
||||
}
|
||||
|
@ -8267,6 +8267,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
bool reuse = (sk->sk_reuse || sp->reuse);
|
||||
struct sctp_bind_hashbucket *head; /* hash list */
|
||||
struct net *net = sock_net(sk);
|
||||
kuid_t uid = sock_i_uid(sk);
|
||||
struct sctp_bind_bucket *pp;
|
||||
unsigned short snum;
|
||||
|
@ -8282,7 +8283,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|||
/* Search for an available port. */
|
||||
int low, high, remaining, index;
|
||||
unsigned int rover;
|
||||
struct net *net = sock_net(sk);
|
||||
|
||||
inet_get_local_port_range(net, &low, &high);
|
||||
remaining = (high - low) + 1;
|
||||
|
@ -8294,12 +8294,12 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|||
rover = low;
|
||||
if (inet_is_local_reserved_port(net, rover))
|
||||
continue;
|
||||
index = sctp_phashfn(sock_net(sk), rover);
|
||||
index = sctp_phashfn(net, rover);
|
||||
head = &sctp_port_hashtable[index];
|
||||
spin_lock(&head->lock);
|
||||
sctp_for_each_hentry(pp, &head->chain)
|
||||
if ((pp->port == rover) &&
|
||||
net_eq(sock_net(sk), pp->net))
|
||||
net_eq(net, pp->net))
|
||||
goto next;
|
||||
break;
|
||||
next:
|
||||
|
@ -8323,10 +8323,10 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|||
* to the port number (snum) - we detect that with the
|
||||
* port iterator, pp being NULL.
|
||||
*/
|
||||
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
|
||||
head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
|
||||
spin_lock(&head->lock);
|
||||
sctp_for_each_hentry(pp, &head->chain) {
|
||||
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
|
||||
if ((pp->port == snum) && net_eq(pp->net, net))
|
||||
goto pp_found;
|
||||
}
|
||||
}
|
||||
|
@ -8382,7 +8382,7 @@ pp_found:
|
|||
pp_not_found:
|
||||
/* If there was a hash table miss, create a new port. */
|
||||
ret = 1;
|
||||
if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
|
||||
if (!pp && !(pp = sctp_bucket_create(head, net, snum)))
|
||||
goto fail_unlock;
|
||||
|
||||
/* In either case (hit or miss), make sure fastreuse is 1 only
|
||||
|
|
|
@ -570,7 +570,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
|||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
||||
len = min_t(int, len, TIPC_MAX_LINK_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -842,7 +842,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
|||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
||||
len = min_t(int, len, TIPC_MAX_LINK_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -4623,8 +4623,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
|
|||
|
||||
inet_get_local_port_range(sock_net(sk), &low, &high);
|
||||
|
||||
if (snum < max(inet_prot_sock(sock_net(sk)), low) ||
|
||||
snum > high) {
|
||||
if (inet_port_requires_bind_service(sock_net(sk), snum) ||
|
||||
snum < low || snum > high) {
|
||||
err = sel_netport_sid(sk->sk_protocol,
|
||||
snum, &sid);
|
||||
if (err)
|
||||
|
|
Loading…
Reference in New Issue