Merge branch 'iucv-next'
Julian Wiedmann says: ==================== net/iucv: updates 2020-05-19 please apply the following patch series for iucv to netdev's net-next tree. s390 dropped its support for power management, this removes the relevant iucv code. Also, some easy cleanups I found mouldering in an old branch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
85bac6a52f
|
@ -158,12 +158,4 @@ struct iucv_sock_list {
|
|||
atomic_t autobind_name;
|
||||
};
|
||||
|
||||
__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
|
||||
poll_table *wait);
|
||||
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
|
||||
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
|
||||
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
|
||||
void iucv_accept_unlink(struct sock *sk);
|
||||
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
|
||||
|
||||
#endif /* __IUCV_H */
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -36,8 +37,6 @@
|
|||
|
||||
static char iucv_userid[80];
|
||||
|
||||
static const struct proto_ops iucv_sock_ops;
|
||||
|
||||
static struct proto iucv_proto = {
|
||||
.name = "AF_IUCV",
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -85,14 +84,11 @@ do { \
|
|||
__ret; \
|
||||
})
|
||||
|
||||
static struct sock *iucv_accept_dequeue(struct sock *parent,
|
||||
struct socket *newsock);
|
||||
static void iucv_sock_kill(struct sock *sk);
|
||||
static void iucv_sock_close(struct sock *sk);
|
||||
static void iucv_sever_path(struct sock *, int);
|
||||
|
||||
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct packet_type *pt, struct net_device *orig_dev);
|
||||
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
|
||||
struct sk_buff *skb, u8 flags);
|
||||
static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
|
||||
|
||||
/* Call Back functions */
|
||||
|
@ -127,110 +123,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
|
|||
memcpy(&dst[8], src, 8);
|
||||
}
|
||||
|
||||
static int afiucv_pm_prepare(struct device *dev)
|
||||
{
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "afiucv_pm_prepare\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void afiucv_pm_complete(struct device *dev)
|
||||
{
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "afiucv_pm_complete\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* afiucv_pm_freeze() - Freeze PM callback
|
||||
* @dev: AFIUCV dummy device
|
||||
*
|
||||
* Sever all established IUCV communication pathes
|
||||
*/
|
||||
static int afiucv_pm_freeze(struct device *dev)
|
||||
{
|
||||
struct iucv_sock *iucv;
|
||||
struct sock *sk;
|
||||
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "afiucv_pm_freeze\n");
|
||||
#endif
|
||||
read_lock(&iucv_sk_list.lock);
|
||||
sk_for_each(sk, &iucv_sk_list.head) {
|
||||
iucv = iucv_sk(sk);
|
||||
switch (sk->sk_state) {
|
||||
case IUCV_DISCONN:
|
||||
case IUCV_CLOSING:
|
||||
case IUCV_CONNECTED:
|
||||
iucv_sever_path(sk, 0);
|
||||
break;
|
||||
case IUCV_OPEN:
|
||||
case IUCV_BOUND:
|
||||
case IUCV_LISTEN:
|
||||
case IUCV_CLOSED:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
skb_queue_purge(&iucv->send_skb_q);
|
||||
skb_queue_purge(&iucv->backlog_skb_q);
|
||||
}
|
||||
read_unlock(&iucv_sk_list.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* afiucv_pm_restore_thaw() - Thaw and restore PM callback
|
||||
* @dev: AFIUCV dummy device
|
||||
*
|
||||
* socket clean up after freeze
|
||||
*/
|
||||
static int afiucv_pm_restore_thaw(struct device *dev)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
|
||||
#endif
|
||||
read_lock(&iucv_sk_list.lock);
|
||||
sk_for_each(sk, &iucv_sk_list.head) {
|
||||
switch (sk->sk_state) {
|
||||
case IUCV_CONNECTED:
|
||||
sk->sk_err = EPIPE;
|
||||
sk->sk_state = IUCV_DISCONN;
|
||||
sk->sk_state_change(sk);
|
||||
break;
|
||||
case IUCV_DISCONN:
|
||||
case IUCV_CLOSING:
|
||||
case IUCV_LISTEN:
|
||||
case IUCV_BOUND:
|
||||
case IUCV_OPEN:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock(&iucv_sk_list.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops afiucv_pm_ops = {
|
||||
.prepare = afiucv_pm_prepare,
|
||||
.complete = afiucv_pm_complete,
|
||||
.freeze = afiucv_pm_freeze,
|
||||
.thaw = afiucv_pm_restore_thaw,
|
||||
.restore = afiucv_pm_restore_thaw,
|
||||
};
|
||||
|
||||
static struct device_driver af_iucv_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "afiucv",
|
||||
.bus = NULL,
|
||||
.pm = &afiucv_pm_ops,
|
||||
};
|
||||
|
||||
/* dummy device used as trigger for PM functions */
|
||||
static struct device *af_iucv_dev;
|
||||
|
||||
/**
|
||||
* iucv_msg_length() - Returns the length of an iucv message.
|
||||
* @msg: Pointer to struct iucv_message, MUST NOT be NULL
|
||||
|
@ -435,6 +327,20 @@ static void iucv_sock_cleanup_listen(struct sock *parent)
|
|||
parent->sk_state = IUCV_CLOSED;
|
||||
}
|
||||
|
||||
static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
sk_add_node(sk, &l->head);
|
||||
write_unlock_bh(&l->lock);
|
||||
}
|
||||
|
||||
static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l->lock);
|
||||
}
|
||||
|
||||
/* Kill socket (only if zapped and orphaned) */
|
||||
static void iucv_sock_kill(struct sock *sk)
|
||||
{
|
||||
|
@ -607,53 +513,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
|
|||
return sk;
|
||||
}
|
||||
|
||||
/* Create an IUCV socket */
|
||||
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
if (protocol && protocol != PF_IUCV)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
switch (sock->type) {
|
||||
case SOCK_STREAM:
|
||||
sock->ops = &iucv_sock_ops;
|
||||
break;
|
||||
case SOCK_SEQPACKET:
|
||||
/* currently, proto ops can handle both sk types */
|
||||
sock->ops = &iucv_sock_ops;
|
||||
break;
|
||||
default:
|
||||
return -ESOCKTNOSUPPORT;
|
||||
}
|
||||
|
||||
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
iucv_sock_init(sk, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
sk_add_node(sk, &l->head);
|
||||
write_unlock_bh(&l->lock);
|
||||
}
|
||||
|
||||
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l->lock);
|
||||
}
|
||||
|
||||
void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
|
||||
static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iucv_sock *par = iucv_sk(parent);
|
||||
|
@ -666,7 +526,7 @@ void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
|
|||
sk_acceptq_added(parent);
|
||||
}
|
||||
|
||||
void iucv_accept_unlink(struct sock *sk)
|
||||
static void iucv_accept_unlink(struct sock *sk)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
|
||||
|
@ -679,7 +539,8 @@ void iucv_accept_unlink(struct sock *sk)
|
|||
sock_put(sk);
|
||||
}
|
||||
|
||||
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
|
||||
static struct sock *iucv_accept_dequeue(struct sock *parent,
|
||||
struct socket *newsock)
|
||||
{
|
||||
struct iucv_sock *isk, *n;
|
||||
struct sock *sk;
|
||||
|
@ -1100,7 +961,6 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
|
||||
/* initialize defaults */
|
||||
cmsg_done = 0; /* check for duplicate headers */
|
||||
txmsg.class = 0;
|
||||
|
||||
/* iterate over control messages */
|
||||
for_each_cmsghdr(cmsg, msg) {
|
||||
|
@ -1511,8 +1371,8 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
|
|||
return 0;
|
||||
}
|
||||
|
||||
__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
|
||||
poll_table *wait)
|
||||
static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
|
||||
poll_table *wait)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
__poll_t mask = 0;
|
||||
|
@ -1664,7 +1524,7 @@ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
|
|||
switch (sk->sk_state) {
|
||||
case IUCV_OPEN:
|
||||
case IUCV_BOUND:
|
||||
if (val < 1 || val > (u16)(~0))
|
||||
if (val < 1 || val > U16_MAX)
|
||||
rc = -EINVAL;
|
||||
else
|
||||
iucv->msglimit = val;
|
||||
|
@ -2396,6 +2256,35 @@ static const struct proto_ops iucv_sock_ops = {
|
|||
.getsockopt = iucv_sock_getsockopt,
|
||||
};
|
||||
|
||||
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
if (protocol && protocol != PF_IUCV)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
switch (sock->type) {
|
||||
case SOCK_STREAM:
|
||||
case SOCK_SEQPACKET:
|
||||
/* currently, proto ops can handle both sk types */
|
||||
sock->ops = &iucv_sock_ops;
|
||||
break;
|
||||
default:
|
||||
return -ESOCKTNOSUPPORT;
|
||||
}
|
||||
|
||||
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
iucv_sock_init(sk, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_proto_family iucv_sock_family_ops = {
|
||||
.family = AF_IUCV,
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -2409,45 +2298,11 @@ static struct packet_type iucv_packet_type = {
|
|||
|
||||
static int afiucv_iucv_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = pr_iucv->iucv_register(&af_iucv_handler, 0);
|
||||
if (err)
|
||||
goto out;
|
||||
/* establish dummy device */
|
||||
af_iucv_driver.bus = pr_iucv->bus;
|
||||
err = driver_register(&af_iucv_driver);
|
||||
if (err)
|
||||
goto out_iucv;
|
||||
af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
|
||||
if (!af_iucv_dev) {
|
||||
err = -ENOMEM;
|
||||
goto out_driver;
|
||||
}
|
||||
dev_set_name(af_iucv_dev, "af_iucv");
|
||||
af_iucv_dev->bus = pr_iucv->bus;
|
||||
af_iucv_dev->parent = pr_iucv->root;
|
||||
af_iucv_dev->release = (void (*)(struct device *))kfree;
|
||||
af_iucv_dev->driver = &af_iucv_driver;
|
||||
err = device_register(af_iucv_dev);
|
||||
if (err)
|
||||
goto out_iucv_dev;
|
||||
return 0;
|
||||
|
||||
out_iucv_dev:
|
||||
put_device(af_iucv_dev);
|
||||
out_driver:
|
||||
driver_unregister(&af_iucv_driver);
|
||||
out_iucv:
|
||||
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
|
||||
out:
|
||||
return err;
|
||||
return pr_iucv->iucv_register(&af_iucv_handler, 0);
|
||||
}
|
||||
|
||||
static void afiucv_iucv_exit(void)
|
||||
{
|
||||
device_unregister(af_iucv_dev);
|
||||
driver_unregister(&af_iucv_driver);
|
||||
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
|
||||
}
|
||||
|
||||
|
|
188
net/iucv/iucv.c
188
net/iucv/iucv.c
|
@ -67,32 +67,9 @@ static int iucv_bus_match(struct device *dev, struct device_driver *drv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
enum iucv_pm_states {
|
||||
IUCV_PM_INITIAL = 0,
|
||||
IUCV_PM_FREEZING = 1,
|
||||
IUCV_PM_THAWING = 2,
|
||||
IUCV_PM_RESTORING = 3,
|
||||
};
|
||||
static enum iucv_pm_states iucv_pm_state;
|
||||
|
||||
static int iucv_pm_prepare(struct device *);
|
||||
static void iucv_pm_complete(struct device *);
|
||||
static int iucv_pm_freeze(struct device *);
|
||||
static int iucv_pm_thaw(struct device *);
|
||||
static int iucv_pm_restore(struct device *);
|
||||
|
||||
static const struct dev_pm_ops iucv_pm_ops = {
|
||||
.prepare = iucv_pm_prepare,
|
||||
.complete = iucv_pm_complete,
|
||||
.freeze = iucv_pm_freeze,
|
||||
.thaw = iucv_pm_thaw,
|
||||
.restore = iucv_pm_restore,
|
||||
};
|
||||
|
||||
struct bus_type iucv_bus = {
|
||||
.name = "iucv",
|
||||
.match = iucv_bus_match,
|
||||
.pm = &iucv_pm_ops,
|
||||
};
|
||||
EXPORT_SYMBOL(iucv_bus);
|
||||
|
||||
|
@ -434,31 +411,6 @@ static void iucv_block_cpu(void *data)
|
|||
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_block_cpu_almost
|
||||
* @data: unused
|
||||
*
|
||||
* Allow connection-severed interrupts only on this cpu.
|
||||
*/
|
||||
static void iucv_block_cpu_almost(void *data)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
union iucv_param *parm;
|
||||
|
||||
/* Allow iucv control interrupts only */
|
||||
parm = iucv_param_irq[cpu];
|
||||
memset(parm, 0, sizeof(union iucv_param));
|
||||
parm->set_mask.ipmask = 0x08;
|
||||
iucv_call_b2f0(IUCV_SETMASK, parm);
|
||||
/* Allow iucv-severed interrupt only */
|
||||
memset(parm, 0, sizeof(union iucv_param));
|
||||
parm->set_mask.ipmask = 0x20;
|
||||
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
|
||||
|
||||
/* Clear indication that iucv interrupts are allowed for this cpu. */
|
||||
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_declare_cpu
|
||||
* @data: unused
|
||||
|
@ -1834,146 +1786,6 @@ static void iucv_external_interrupt(struct ext_code ext_code,
|
|||
spin_unlock(&iucv_queue_lock);
|
||||
}
|
||||
|
||||
static int iucv_pm_prepare(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_INFO "iucv_pm_prepare\n");
|
||||
#endif
|
||||
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare)
|
||||
rc = dev->driver->pm->prepare(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void iucv_pm_complete(struct device *dev)
|
||||
{
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_INFO "iucv_pm_complete\n");
|
||||
#endif
|
||||
if (dev->driver && dev->driver->pm && dev->driver->pm->complete)
|
||||
dev->driver->pm->complete(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_path_table_empty() - determine if iucv path table is empty
|
||||
*
|
||||
* Returns 0 if there are still iucv pathes defined
|
||||
* 1 if there are no iucv pathes defined
|
||||
*/
|
||||
static int iucv_path_table_empty(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < iucv_max_pathid; i++) {
|
||||
if (iucv_path_table[i])
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_pm_freeze() - Freeze PM callback
|
||||
* @dev: iucv-based device
|
||||
*
|
||||
* disable iucv interrupts
|
||||
* invoke callback function of the iucv-based driver
|
||||
* shut down iucv, if no iucv-pathes are established anymore
|
||||
*/
|
||||
static int iucv_pm_freeze(struct device *dev)
|
||||
{
|
||||
int cpu;
|
||||
struct iucv_irq_list *p, *n;
|
||||
int rc = 0;
|
||||
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "iucv_pm_freeze\n");
|
||||
#endif
|
||||
if (iucv_pm_state != IUCV_PM_FREEZING) {
|
||||
for_each_cpu(cpu, &iucv_irq_cpumask)
|
||||
smp_call_function_single(cpu, iucv_block_cpu_almost,
|
||||
NULL, 1);
|
||||
cancel_work_sync(&iucv_work);
|
||||
list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
|
||||
list_del_init(&p->list);
|
||||
iucv_sever_pathid(p->data.ippathid,
|
||||
iucv_error_no_listener);
|
||||
kfree(p);
|
||||
}
|
||||
}
|
||||
iucv_pm_state = IUCV_PM_FREEZING;
|
||||
if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
|
||||
rc = dev->driver->pm->freeze(dev);
|
||||
if (iucv_path_table_empty())
|
||||
iucv_disable();
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_pm_thaw() - Thaw PM callback
|
||||
* @dev: iucv-based device
|
||||
*
|
||||
* make iucv ready for use again: allocate path table, declare interrupt buffers
|
||||
* and enable iucv interrupts
|
||||
* invoke callback function of the iucv-based driver
|
||||
*/
|
||||
static int iucv_pm_thaw(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "iucv_pm_thaw\n");
|
||||
#endif
|
||||
iucv_pm_state = IUCV_PM_THAWING;
|
||||
if (!iucv_path_table) {
|
||||
rc = iucv_enable();
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
if (cpumask_empty(&iucv_irq_cpumask)) {
|
||||
if (iucv_nonsmp_handler)
|
||||
/* enable interrupts on one cpu */
|
||||
iucv_allow_cpu(NULL);
|
||||
else
|
||||
/* enable interrupts on all cpus */
|
||||
iucv_setmask_mp();
|
||||
}
|
||||
if (dev->driver && dev->driver->pm && dev->driver->pm->thaw)
|
||||
rc = dev->driver->pm->thaw(dev);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_pm_restore() - Restore PM callback
|
||||
* @dev: iucv-based device
|
||||
*
|
||||
* make iucv ready for use again: allocate path table, declare interrupt buffers
|
||||
* and enable iucv interrupts
|
||||
* invoke callback function of the iucv-based driver
|
||||
*/
|
||||
static int iucv_pm_restore(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
|
||||
#endif
|
||||
if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table)
|
||||
pr_warn("Suspending Linux did not completely close all IUCV connections\n");
|
||||
iucv_pm_state = IUCV_PM_RESTORING;
|
||||
if (cpumask_empty(&iucv_irq_cpumask)) {
|
||||
rc = iucv_query_maxconn();
|
||||
rc = iucv_enable();
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
if (dev->driver && dev->driver->pm && dev->driver->pm->restore)
|
||||
rc = dev->driver->pm->restore(dev);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct iucv_interface iucv_if = {
|
||||
.message_receive = iucv_message_receive,
|
||||
.__message_receive = __iucv_message_receive,
|
||||
|
|
Loading…
Reference in New Issue