| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | *  IUCV protocol stack for Linux on zSeries | 
|  | 3 | * | 
|  | 4 | *  Copyright IBM Corp. 2006, 2009 | 
|  | 5 | * | 
|  | 6 | *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com> | 
|  | 7 | *		Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | 
|  | 8 | *  PM functions: | 
|  | 9 | *		Ursula Braun <ursula.braun@de.ibm.com> | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | #define KMSG_COMPONENT "af_iucv" | 
|  | 13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 
|  | 14 |  | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/types.h> | 
|  | 17 | #include <linux/list.h> | 
|  | 18 | #include <linux/errno.h> | 
|  | 19 | #include <linux/kernel.h> | 
|  | 20 | #include <linux/sched/signal.h> | 
|  | 21 | #include <linux/slab.h> | 
|  | 22 | #include <linux/skbuff.h> | 
|  | 23 | #include <linux/init.h> | 
|  | 24 | #include <linux/poll.h> | 
|  | 25 | #include <linux/security.h> | 
|  | 26 | #include <net/sock.h> | 
|  | 27 | #include <asm/ebcdic.h> | 
|  | 28 | #include <asm/cpcmd.h> | 
|  | 29 | #include <linux/kmod.h> | 
|  | 30 |  | 
|  | 31 | #include <net/iucv/af_iucv.h> | 
|  | 32 |  | 
|  | 33 | #define VERSION "1.2" | 
|  | 34 |  | 
|  | 35 | static char iucv_userid[80]; | 
|  | 36 |  | 
|  | 37 | static const struct proto_ops iucv_sock_ops; | 
|  | 38 |  | 
|  | 39 | static struct proto iucv_proto = { | 
|  | 40 | .name		= "AF_IUCV", | 
|  | 41 | .owner		= THIS_MODULE, | 
|  | 42 | .obj_size	= sizeof(struct iucv_sock), | 
|  | 43 | }; | 
|  | 44 |  | 
|  | 45 | static struct iucv_interface *pr_iucv; | 
|  | 46 |  | 
|  | 47 | /* special AF_IUCV IPRM messages */ | 
|  | 48 | static const u8 iprm_shutdown[8] = | 
|  | 49 | {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; | 
|  | 50 |  | 
|  | 51 | #define TRGCLS_SIZE	(sizeof(((struct iucv_message *)0)->class)) | 
|  | 52 |  | 
|  | 53 | #define __iucv_sock_wait(sk, condition, timeo, ret)			\ | 
|  | 54 | do {									\ | 
|  | 55 | DEFINE_WAIT(__wait);						\ | 
|  | 56 | long __timeo = timeo;						\ | 
|  | 57 | ret = 0;							\ | 
|  | 58 | prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);	\ | 
|  | 59 | while (!(condition)) {						\ | 
|  | 60 | if (!__timeo) {						\ | 
|  | 61 | ret = -EAGAIN;					\ | 
|  | 62 | break;						\ | 
|  | 63 | }							\ | 
|  | 64 | if (signal_pending(current)) {				\ | 
|  | 65 | ret = sock_intr_errno(__timeo);			\ | 
|  | 66 | break;						\ | 
|  | 67 | }							\ | 
|  | 68 | release_sock(sk);					\ | 
|  | 69 | __timeo = schedule_timeout(__timeo);			\ | 
|  | 70 | lock_sock(sk);						\ | 
|  | 71 | ret = sock_error(sk);					\ | 
|  | 72 | if (ret)						\ | 
|  | 73 | break;						\ | 
|  | 74 | }								\ | 
|  | 75 | finish_wait(sk_sleep(sk), &__wait);				\ | 
|  | 76 | } while (0) | 
|  | 77 |  | 
|  | 78 | #define iucv_sock_wait(sk, condition, timeo)				\ | 
|  | 79 | ({									\ | 
|  | 80 | int __ret = 0;							\ | 
|  | 81 | if (!(condition))						\ | 
|  | 82 | __iucv_sock_wait(sk, condition, timeo, __ret);		\ | 
|  | 83 | __ret;								\ | 
|  | 84 | }) | 
|  | 85 |  | 
|  | 86 | static void iucv_sock_kill(struct sock *sk); | 
|  | 87 | static void iucv_sock_close(struct sock *sk); | 
|  | 88 | static void iucv_sever_path(struct sock *, int); | 
|  | 89 |  | 
|  | 90 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | 
|  | 91 | struct packet_type *pt, struct net_device *orig_dev); | 
|  | 92 | static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | 
|  | 93 | struct sk_buff *skb, u8 flags); | 
|  | 94 | static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); | 
|  | 95 |  | 
|  | 96 | /* Call Back functions */ | 
|  | 97 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); | 
|  | 98 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); | 
|  | 99 | static void iucv_callback_connack(struct iucv_path *, u8 *); | 
|  | 100 | static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *); | 
|  | 101 | static void iucv_callback_connrej(struct iucv_path *, u8 *); | 
|  | 102 | static void iucv_callback_shutdown(struct iucv_path *, u8 *); | 
|  | 103 |  | 
|  | 104 | static struct iucv_sock_list iucv_sk_list = { | 
|  | 105 | .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), | 
|  | 106 | .autobind_name = ATOMIC_INIT(0) | 
|  | 107 | }; | 
|  | 108 |  | 
|  | 109 | static struct iucv_handler af_iucv_handler = { | 
|  | 110 | .path_pending	  = iucv_callback_connreq, | 
|  | 111 | .path_complete	  = iucv_callback_connack, | 
|  | 112 | .path_severed	  = iucv_callback_connrej, | 
|  | 113 | .message_pending  = iucv_callback_rx, | 
|  | 114 | .message_complete = iucv_callback_txdone, | 
|  | 115 | .path_quiesced	  = iucv_callback_shutdown, | 
|  | 116 | }; | 
|  | 117 |  | 
|  | 118 | static inline void high_nmcpy(unsigned char *dst, char *src) | 
|  | 119 | { | 
|  | 120 | memcpy(dst, src, 8); | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | static inline void low_nmcpy(unsigned char *dst, char *src) | 
|  | 124 | { | 
|  | 125 | memcpy(&dst[8], src, 8); | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static int afiucv_pm_prepare(struct device *dev) | 
|  | 129 | { | 
|  | 130 | #ifdef CONFIG_PM_DEBUG | 
|  | 131 | printk(KERN_WARNING "afiucv_pm_prepare\n"); | 
|  | 132 | #endif | 
|  | 133 | return 0; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | static void afiucv_pm_complete(struct device *dev) | 
|  | 137 | { | 
|  | 138 | #ifdef CONFIG_PM_DEBUG | 
|  | 139 | printk(KERN_WARNING "afiucv_pm_complete\n"); | 
|  | 140 | #endif | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | /** | 
|  | 144 | * afiucv_pm_freeze() - Freeze PM callback | 
|  | 145 | * @dev:	AFIUCV dummy device | 
|  | 146 | * | 
|  | 147 | * Sever all established IUCV communication pathes | 
|  | 148 | */ | 
|  | 149 | static int afiucv_pm_freeze(struct device *dev) | 
|  | 150 | { | 
|  | 151 | struct iucv_sock *iucv; | 
|  | 152 | struct sock *sk; | 
|  | 153 |  | 
|  | 154 | #ifdef CONFIG_PM_DEBUG | 
|  | 155 | printk(KERN_WARNING "afiucv_pm_freeze\n"); | 
|  | 156 | #endif | 
|  | 157 | read_lock(&iucv_sk_list.lock); | 
|  | 158 | sk_for_each(sk, &iucv_sk_list.head) { | 
|  | 159 | iucv = iucv_sk(sk); | 
|  | 160 | switch (sk->sk_state) { | 
|  | 161 | case IUCV_DISCONN: | 
|  | 162 | case IUCV_CLOSING: | 
|  | 163 | case IUCV_CONNECTED: | 
|  | 164 | iucv_sever_path(sk, 0); | 
|  | 165 | break; | 
|  | 166 | case IUCV_OPEN: | 
|  | 167 | case IUCV_BOUND: | 
|  | 168 | case IUCV_LISTEN: | 
|  | 169 | case IUCV_CLOSED: | 
|  | 170 | default: | 
|  | 171 | break; | 
|  | 172 | } | 
|  | 173 | skb_queue_purge(&iucv->send_skb_q); | 
|  | 174 | skb_queue_purge(&iucv->backlog_skb_q); | 
|  | 175 | } | 
|  | 176 | read_unlock(&iucv_sk_list.lock); | 
|  | 177 | return 0; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | /** | 
|  | 181 | * afiucv_pm_restore_thaw() - Thaw and restore PM callback | 
|  | 182 | * @dev:	AFIUCV dummy device | 
|  | 183 | * | 
|  | 184 | * socket clean up after freeze | 
|  | 185 | */ | 
|  | 186 | static int afiucv_pm_restore_thaw(struct device *dev) | 
|  | 187 | { | 
|  | 188 | struct sock *sk; | 
|  | 189 |  | 
|  | 190 | #ifdef CONFIG_PM_DEBUG | 
|  | 191 | printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); | 
|  | 192 | #endif | 
|  | 193 | read_lock(&iucv_sk_list.lock); | 
|  | 194 | sk_for_each(sk, &iucv_sk_list.head) { | 
|  | 195 | switch (sk->sk_state) { | 
|  | 196 | case IUCV_CONNECTED: | 
|  | 197 | sk->sk_err = EPIPE; | 
|  | 198 | sk->sk_state = IUCV_DISCONN; | 
|  | 199 | sk->sk_state_change(sk); | 
|  | 200 | break; | 
|  | 201 | case IUCV_DISCONN: | 
|  | 202 | case IUCV_CLOSING: | 
|  | 203 | case IUCV_LISTEN: | 
|  | 204 | case IUCV_BOUND: | 
|  | 205 | case IUCV_OPEN: | 
|  | 206 | default: | 
|  | 207 | break; | 
|  | 208 | } | 
|  | 209 | } | 
|  | 210 | read_unlock(&iucv_sk_list.lock); | 
|  | 211 | return 0; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static const struct dev_pm_ops afiucv_pm_ops = { | 
|  | 215 | .prepare = afiucv_pm_prepare, | 
|  | 216 | .complete = afiucv_pm_complete, | 
|  | 217 | .freeze = afiucv_pm_freeze, | 
|  | 218 | .thaw = afiucv_pm_restore_thaw, | 
|  | 219 | .restore = afiucv_pm_restore_thaw, | 
|  | 220 | }; | 
|  | 221 |  | 
|  | 222 | static struct device_driver af_iucv_driver = { | 
|  | 223 | .owner = THIS_MODULE, | 
|  | 224 | .name = "afiucv", | 
|  | 225 | .bus  = NULL, | 
|  | 226 | .pm   = &afiucv_pm_ops, | 
|  | 227 | }; | 
|  | 228 |  | 
|  | 229 | /* dummy device used as trigger for PM functions */ | 
|  | 230 | static struct device *af_iucv_dev; | 
|  | 231 |  | 
|  | 232 | /** | 
|  | 233 | * iucv_msg_length() - Returns the length of an iucv message. | 
|  | 234 | * @msg:	Pointer to struct iucv_message, MUST NOT be NULL | 
|  | 235 | * | 
|  | 236 | * The function returns the length of the specified iucv message @msg of data | 
|  | 237 | * stored in a buffer and of data stored in the parameter list (PRMDATA). | 
|  | 238 | * | 
|  | 239 | * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket | 
|  | 240 | * data: | 
|  | 241 | *	PRMDATA[0..6]	socket data (max 7 bytes); | 
|  | 242 | *	PRMDATA[7]	socket data length value (len is 0xff - PRMDATA[7]) | 
|  | 243 | * | 
|  | 244 | * The socket data length is computed by subtracting the socket data length | 
|  | 245 | * value from 0xFF. | 
|  | 246 | * If the socket data len is greater 7, then PRMDATA can be used for special | 
|  | 247 | * notifications (see iucv_sock_shutdown); and further, | 
|  | 248 | * if the socket data len is > 7, the function returns 8. | 
|  | 249 | * | 
|  | 250 | * Use this function to allocate socket buffers to store iucv message data. | 
|  | 251 | */ | 
|  | 252 | static inline size_t iucv_msg_length(struct iucv_message *msg) | 
|  | 253 | { | 
|  | 254 | size_t datalen; | 
|  | 255 |  | 
|  | 256 | if (msg->flags & IUCV_IPRMDATA) { | 
|  | 257 | datalen = 0xff - msg->rmmsg[7]; | 
|  | 258 | return (datalen < 8) ? datalen : 8; | 
|  | 259 | } | 
|  | 260 | return msg->length; | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | /** | 
|  | 264 | * iucv_sock_in_state() - check for specific states | 
|  | 265 | * @sk:		sock structure | 
|  | 266 | * @state:	first iucv sk state | 
|  | 267 | * @state:	second iucv sk state | 
|  | 268 | * | 
|  | 269 | * Returns true if the socket in either in the first or second state. | 
|  | 270 | */ | 
|  | 271 | static int iucv_sock_in_state(struct sock *sk, int state, int state2) | 
|  | 272 | { | 
|  | 273 | return (sk->sk_state == state || sk->sk_state == state2); | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | /** | 
|  | 277 | * iucv_below_msglim() - function to check if messages can be sent | 
|  | 278 | * @sk:		sock structure | 
|  | 279 | * | 
|  | 280 | * Returns true if the send queue length is lower than the message limit. | 
|  | 281 | * Always returns true if the socket is not connected (no iucv path for | 
|  | 282 | * checking the message limit). | 
|  | 283 | */ | 
|  | 284 | static inline int iucv_below_msglim(struct sock *sk) | 
|  | 285 | { | 
|  | 286 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 287 |  | 
|  | 288 | if (sk->sk_state != IUCV_CONNECTED) | 
|  | 289 | return 1; | 
|  | 290 | if (iucv->transport == AF_IUCV_TRANS_IUCV) | 
|  | 291 | return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); | 
|  | 292 | else | 
|  | 293 | return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && | 
|  | 294 | (atomic_read(&iucv->pendings) <= 0)); | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | /** | 
|  | 298 | * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit | 
|  | 299 | */ | 
|  | 300 | static void iucv_sock_wake_msglim(struct sock *sk) | 
|  | 301 | { | 
|  | 302 | struct socket_wq *wq; | 
|  | 303 |  | 
|  | 304 | rcu_read_lock(); | 
|  | 305 | wq = rcu_dereference(sk->sk_wq); | 
|  | 306 | if (skwq_has_sleeper(wq)) | 
|  | 307 | wake_up_interruptible_all(&wq->wait); | 
|  | 308 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 
|  | 309 | rcu_read_unlock(); | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | /** | 
|  | 313 | * afiucv_hs_send() - send a message through HiperSockets transport | 
|  | 314 | */ | 
|  | 315 | static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | 
|  | 316 | struct sk_buff *skb, u8 flags) | 
|  | 317 | { | 
|  | 318 | struct iucv_sock *iucv = iucv_sk(sock); | 
|  | 319 | struct af_iucv_trans_hdr *phs_hdr; | 
|  | 320 | struct sk_buff *nskb; | 
|  | 321 | int err, confirm_recv = 0; | 
|  | 322 |  | 
|  | 323 | memset(skb->head, 0, ETH_HLEN); | 
|  | 324 | phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr)); | 
|  | 325 | skb_reset_mac_header(skb); | 
|  | 326 | skb_reset_network_header(skb); | 
|  | 327 | skb_push(skb, ETH_HLEN); | 
|  | 328 | skb_reset_mac_header(skb); | 
|  | 329 | memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); | 
|  | 330 |  | 
|  | 331 | phs_hdr->magic = ETH_P_AF_IUCV; | 
|  | 332 | phs_hdr->version = 1; | 
|  | 333 | phs_hdr->flags = flags; | 
|  | 334 | if (flags == AF_IUCV_FLAG_SYN) | 
|  | 335 | phs_hdr->window = iucv->msglimit; | 
|  | 336 | else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { | 
|  | 337 | confirm_recv = atomic_read(&iucv->msg_recv); | 
|  | 338 | phs_hdr->window = confirm_recv; | 
|  | 339 | if (confirm_recv) | 
|  | 340 | phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; | 
|  | 341 | } | 
|  | 342 | memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); | 
|  | 343 | memcpy(phs_hdr->destAppName, iucv->dst_name, 8); | 
|  | 344 | memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); | 
|  | 345 | memcpy(phs_hdr->srcAppName, iucv->src_name, 8); | 
|  | 346 | ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); | 
|  | 347 | ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); | 
|  | 348 | ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); | 
|  | 349 | ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); | 
|  | 350 | if (imsg) | 
|  | 351 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); | 
|  | 352 |  | 
|  | 353 | skb->dev = iucv->hs_dev; | 
|  | 354 | if (!skb->dev) { | 
|  | 355 | err = -ENODEV; | 
|  | 356 | goto err_free; | 
|  | 357 | } | 
|  | 358 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { | 
|  | 359 | err = -ENETDOWN; | 
|  | 360 | goto err_free; | 
|  | 361 | } | 
|  | 362 | if (skb->len > skb->dev->mtu) { | 
|  | 363 | if (sock->sk_type == SOCK_SEQPACKET) { | 
|  | 364 | err = -EMSGSIZE; | 
|  | 365 | goto err_free; | 
|  | 366 | } | 
|  | 367 | skb_trim(skb, skb->dev->mtu); | 
|  | 368 | } | 
|  | 369 | skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); | 
|  | 370 | nskb = skb_clone(skb, GFP_ATOMIC); | 
|  | 371 | if (!nskb) { | 
|  | 372 | err = -ENOMEM; | 
|  | 373 | goto err_free; | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | skb_queue_tail(&iucv->send_skb_q, nskb); | 
|  | 377 | err = dev_queue_xmit(skb); | 
|  | 378 | if (net_xmit_eval(err)) { | 
|  | 379 | skb_unlink(nskb, &iucv->send_skb_q); | 
|  | 380 | kfree_skb(nskb); | 
|  | 381 | } else { | 
|  | 382 | atomic_sub(confirm_recv, &iucv->msg_recv); | 
|  | 383 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); | 
|  | 384 | } | 
|  | 385 | return net_xmit_eval(err); | 
|  | 386 |  | 
|  | 387 | err_free: | 
|  | 388 | kfree_skb(skb); | 
|  | 389 | return err; | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | static struct sock *__iucv_get_sock_by_name(char *nm) | 
|  | 393 | { | 
|  | 394 | struct sock *sk; | 
|  | 395 |  | 
|  | 396 | sk_for_each(sk, &iucv_sk_list.head) | 
|  | 397 | if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) | 
|  | 398 | return sk; | 
|  | 399 |  | 
|  | 400 | return NULL; | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 | static void iucv_sock_destruct(struct sock *sk) | 
|  | 404 | { | 
|  | 405 | skb_queue_purge(&sk->sk_receive_queue); | 
|  | 406 | skb_queue_purge(&sk->sk_error_queue); | 
|  | 407 |  | 
|  | 408 | sk_mem_reclaim(sk); | 
|  | 409 |  | 
|  | 410 | if (!sock_flag(sk, SOCK_DEAD)) { | 
|  | 411 | pr_err("Attempt to release alive iucv socket %p\n", sk); | 
|  | 412 | return; | 
|  | 413 | } | 
|  | 414 |  | 
|  | 415 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | 
|  | 416 | WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | 
|  | 417 | WARN_ON(sk->sk_wmem_queued); | 
|  | 418 | WARN_ON(sk->sk_forward_alloc); | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | /* Cleanup Listen */ | 
|  | 422 | static void iucv_sock_cleanup_listen(struct sock *parent) | 
|  | 423 | { | 
|  | 424 | struct sock *sk; | 
|  | 425 |  | 
|  | 426 | /* Close non-accepted connections */ | 
|  | 427 | while ((sk = iucv_accept_dequeue(parent, NULL))) { | 
|  | 428 | iucv_sock_close(sk); | 
|  | 429 | iucv_sock_kill(sk); | 
|  | 430 | } | 
|  | 431 |  | 
|  | 432 | parent->sk_state = IUCV_CLOSED; | 
|  | 433 | } | 
|  | 434 |  | 
|  | 435 | /* Kill socket (only if zapped and orphaned) */ | 
|  | 436 | static void iucv_sock_kill(struct sock *sk) | 
|  | 437 | { | 
|  | 438 | if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) | 
|  | 439 | return; | 
|  | 440 |  | 
|  | 441 | iucv_sock_unlink(&iucv_sk_list, sk); | 
|  | 442 | sock_set_flag(sk, SOCK_DEAD); | 
|  | 443 | sock_put(sk); | 
|  | 444 | } | 
|  | 445 |  | 
|  | 446 | /* Terminate an IUCV path */ | 
|  | 447 | static void iucv_sever_path(struct sock *sk, int with_user_data) | 
|  | 448 | { | 
|  | 449 | unsigned char user_data[16]; | 
|  | 450 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 451 | struct iucv_path *path = iucv->path; | 
|  | 452 |  | 
|  | 453 | if (iucv->path) { | 
|  | 454 | iucv->path = NULL; | 
|  | 455 | if (with_user_data) { | 
|  | 456 | low_nmcpy(user_data, iucv->src_name); | 
|  | 457 | high_nmcpy(user_data, iucv->dst_name); | 
|  | 458 | ASCEBC(user_data, sizeof(user_data)); | 
|  | 459 | pr_iucv->path_sever(path, user_data); | 
|  | 460 | } else | 
|  | 461 | pr_iucv->path_sever(path, NULL); | 
|  | 462 | iucv_path_free(path); | 
|  | 463 | } | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | /* Send controlling flags through an IUCV socket for HIPER transport */ | 
|  | 467 | static int iucv_send_ctrl(struct sock *sk, u8 flags) | 
|  | 468 | { | 
|  | 469 | int err = 0; | 
|  | 470 | int blen; | 
|  | 471 | struct sk_buff *skb; | 
|  | 472 | u8 shutdown = 0; | 
|  | 473 |  | 
|  | 474 | blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; | 
|  | 475 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 
|  | 476 | /* controlling flags should be sent anyway */ | 
|  | 477 | shutdown = sk->sk_shutdown; | 
|  | 478 | sk->sk_shutdown &= RCV_SHUTDOWN; | 
|  | 479 | } | 
|  | 480 | skb = sock_alloc_send_skb(sk, blen, 1, &err); | 
|  | 481 | if (skb) { | 
|  | 482 | skb_reserve(skb, blen); | 
|  | 483 | err = afiucv_hs_send(NULL, sk, skb, flags); | 
|  | 484 | } | 
|  | 485 | if (shutdown) | 
|  | 486 | sk->sk_shutdown = shutdown; | 
|  | 487 | return err; | 
|  | 488 | } | 
|  | 489 |  | 
|  | 490 | /* Close an IUCV socket */ | 
|  | 491 | static void iucv_sock_close(struct sock *sk) | 
|  | 492 | { | 
|  | 493 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 494 | unsigned long timeo; | 
|  | 495 | int err = 0; | 
|  | 496 |  | 
|  | 497 | lock_sock(sk); | 
|  | 498 |  | 
|  | 499 | switch (sk->sk_state) { | 
|  | 500 | case IUCV_LISTEN: | 
|  | 501 | iucv_sock_cleanup_listen(sk); | 
|  | 502 | break; | 
|  | 503 |  | 
|  | 504 | case IUCV_CONNECTED: | 
|  | 505 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 
|  | 506 | err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); | 
|  | 507 | sk->sk_state = IUCV_DISCONN; | 
|  | 508 | sk->sk_state_change(sk); | 
|  | 509 | } | 
|  | 510 | case IUCV_DISCONN:   /* fall through */ | 
|  | 511 | sk->sk_state = IUCV_CLOSING; | 
|  | 512 | sk->sk_state_change(sk); | 
|  | 513 |  | 
|  | 514 | if (!err && !skb_queue_empty(&iucv->send_skb_q)) { | 
|  | 515 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | 
|  | 516 | timeo = sk->sk_lingertime; | 
|  | 517 | else | 
|  | 518 | timeo = IUCV_DISCONN_TIMEOUT; | 
|  | 519 | iucv_sock_wait(sk, | 
|  | 520 | iucv_sock_in_state(sk, IUCV_CLOSED, 0), | 
|  | 521 | timeo); | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | case IUCV_CLOSING:   /* fall through */ | 
|  | 525 | sk->sk_state = IUCV_CLOSED; | 
|  | 526 | sk->sk_state_change(sk); | 
|  | 527 |  | 
|  | 528 | sk->sk_err = ECONNRESET; | 
|  | 529 | sk->sk_state_change(sk); | 
|  | 530 |  | 
|  | 531 | skb_queue_purge(&iucv->send_skb_q); | 
|  | 532 | skb_queue_purge(&iucv->backlog_skb_q); | 
|  | 533 |  | 
|  | 534 | default:   /* fall through */ | 
|  | 535 | iucv_sever_path(sk, 1); | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | if (iucv->hs_dev) { | 
|  | 539 | dev_put(iucv->hs_dev); | 
|  | 540 | iucv->hs_dev = NULL; | 
|  | 541 | sk->sk_bound_dev_if = 0; | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | /* mark socket for deletion by iucv_sock_kill() */ | 
|  | 545 | sock_set_flag(sk, SOCK_ZAPPED); | 
|  | 546 |  | 
|  | 547 | release_sock(sk); | 
|  | 548 | } | 
|  | 549 |  | 
|  | 550 | static void iucv_sock_init(struct sock *sk, struct sock *parent) | 
|  | 551 | { | 
|  | 552 | if (parent) { | 
|  | 553 | sk->sk_type = parent->sk_type; | 
|  | 554 | security_sk_clone(parent, sk); | 
|  | 555 | } | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) | 
|  | 559 | { | 
|  | 560 | struct sock *sk; | 
|  | 561 | struct iucv_sock *iucv; | 
|  | 562 |  | 
|  | 563 | sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); | 
|  | 564 | if (!sk) | 
|  | 565 | return NULL; | 
|  | 566 | iucv = iucv_sk(sk); | 
|  | 567 |  | 
|  | 568 | sock_init_data(sock, sk); | 
|  | 569 | INIT_LIST_HEAD(&iucv->accept_q); | 
|  | 570 | spin_lock_init(&iucv->accept_q_lock); | 
|  | 571 | skb_queue_head_init(&iucv->send_skb_q); | 
|  | 572 | INIT_LIST_HEAD(&iucv->message_q.list); | 
|  | 573 | spin_lock_init(&iucv->message_q.lock); | 
|  | 574 | skb_queue_head_init(&iucv->backlog_skb_q); | 
|  | 575 | iucv->send_tag = 0; | 
|  | 576 | atomic_set(&iucv->pendings, 0); | 
|  | 577 | iucv->flags = 0; | 
|  | 578 | iucv->msglimit = 0; | 
|  | 579 | atomic_set(&iucv->msg_sent, 0); | 
|  | 580 | atomic_set(&iucv->msg_recv, 0); | 
|  | 581 | iucv->path = NULL; | 
|  | 582 | iucv->sk_txnotify = afiucv_hs_callback_txnotify; | 
|  | 583 | memset(&iucv->src_user_id , 0, 32); | 
|  | 584 | if (pr_iucv) | 
|  | 585 | iucv->transport = AF_IUCV_TRANS_IUCV; | 
|  | 586 | else | 
|  | 587 | iucv->transport = AF_IUCV_TRANS_HIPER; | 
|  | 588 |  | 
|  | 589 | sk->sk_destruct = iucv_sock_destruct; | 
|  | 590 | sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; | 
|  | 591 | sk->sk_allocation = GFP_DMA; | 
|  | 592 |  | 
|  | 593 | sock_reset_flag(sk, SOCK_ZAPPED); | 
|  | 594 |  | 
|  | 595 | sk->sk_protocol = proto; | 
|  | 596 | sk->sk_state	= IUCV_OPEN; | 
|  | 597 |  | 
|  | 598 | iucv_sock_link(&iucv_sk_list, sk); | 
|  | 599 | return sk; | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /* Create an IUCV socket */ | 
|  | 603 | static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, | 
|  | 604 | int kern) | 
|  | 605 | { | 
|  | 606 | struct sock *sk; | 
|  | 607 |  | 
|  | 608 | if (protocol && protocol != PF_IUCV) | 
|  | 609 | return -EPROTONOSUPPORT; | 
|  | 610 |  | 
|  | 611 | sock->state = SS_UNCONNECTED; | 
|  | 612 |  | 
|  | 613 | switch (sock->type) { | 
|  | 614 | case SOCK_STREAM: | 
|  | 615 | sock->ops = &iucv_sock_ops; | 
|  | 616 | break; | 
|  | 617 | case SOCK_SEQPACKET: | 
|  | 618 | /* currently, proto ops can handle both sk types */ | 
|  | 619 | sock->ops = &iucv_sock_ops; | 
|  | 620 | break; | 
|  | 621 | default: | 
|  | 622 | return -ESOCKTNOSUPPORT; | 
|  | 623 | } | 
|  | 624 |  | 
|  | 625 | sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); | 
|  | 626 | if (!sk) | 
|  | 627 | return -ENOMEM; | 
|  | 628 |  | 
|  | 629 | iucv_sock_init(sk, NULL); | 
|  | 630 |  | 
|  | 631 | return 0; | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) | 
|  | 635 | { | 
|  | 636 | write_lock_bh(&l->lock); | 
|  | 637 | sk_add_node(sk, &l->head); | 
|  | 638 | write_unlock_bh(&l->lock); | 
|  | 639 | } | 
|  | 640 |  | 
|  | 641 | void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) | 
|  | 642 | { | 
|  | 643 | write_lock_bh(&l->lock); | 
|  | 644 | sk_del_node_init(sk); | 
|  | 645 | write_unlock_bh(&l->lock); | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | void iucv_accept_enqueue(struct sock *parent, struct sock *sk) | 
|  | 649 | { | 
|  | 650 | unsigned long flags; | 
|  | 651 | struct iucv_sock *par = iucv_sk(parent); | 
|  | 652 |  | 
|  | 653 | sock_hold(sk); | 
|  | 654 | spin_lock_irqsave(&par->accept_q_lock, flags); | 
|  | 655 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); | 
|  | 656 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | 
|  | 657 | iucv_sk(sk)->parent = parent; | 
|  | 658 | sk_acceptq_added(parent); | 
|  | 659 | } | 
|  | 660 |  | 
|  | 661 | void iucv_accept_unlink(struct sock *sk) | 
|  | 662 | { | 
|  | 663 | unsigned long flags; | 
|  | 664 | struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); | 
|  | 665 |  | 
|  | 666 | spin_lock_irqsave(&par->accept_q_lock, flags); | 
|  | 667 | list_del_init(&iucv_sk(sk)->accept_q); | 
|  | 668 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | 
|  | 669 | sk_acceptq_removed(iucv_sk(sk)->parent); | 
|  | 670 | iucv_sk(sk)->parent = NULL; | 
|  | 671 | sock_put(sk); | 
|  | 672 | } | 
|  | 673 |  | 
|  | 674 | struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) | 
|  | 675 | { | 
|  | 676 | struct iucv_sock *isk, *n; | 
|  | 677 | struct sock *sk; | 
|  | 678 |  | 
|  | 679 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { | 
|  | 680 | sk = (struct sock *) isk; | 
|  | 681 | lock_sock(sk); | 
|  | 682 |  | 
|  | 683 | if (sk->sk_state == IUCV_CLOSED) { | 
|  | 684 | iucv_accept_unlink(sk); | 
|  | 685 | release_sock(sk); | 
|  | 686 | continue; | 
|  | 687 | } | 
|  | 688 |  | 
|  | 689 | if (sk->sk_state == IUCV_CONNECTED || | 
|  | 690 | sk->sk_state == IUCV_DISCONN || | 
|  | 691 | !newsock) { | 
|  | 692 | iucv_accept_unlink(sk); | 
|  | 693 | if (newsock) | 
|  | 694 | sock_graft(sk, newsock); | 
|  | 695 |  | 
|  | 696 | release_sock(sk); | 
|  | 697 | return sk; | 
|  | 698 | } | 
|  | 699 |  | 
|  | 700 | release_sock(sk); | 
|  | 701 | } | 
|  | 702 | return NULL; | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | static void __iucv_auto_name(struct iucv_sock *iucv) | 
|  | 706 | { | 
|  | 707 | char name[12]; | 
|  | 708 |  | 
|  | 709 | sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); | 
|  | 710 | while (__iucv_get_sock_by_name(name)) { | 
|  | 711 | sprintf(name, "%08x", | 
|  | 712 | atomic_inc_return(&iucv_sk_list.autobind_name)); | 
|  | 713 | } | 
|  | 714 | memcpy(iucv->src_name, name, 8); | 
|  | 715 | } | 
|  | 716 |  | 
|  | 717 | /* Bind an unbound socket */ | 
|  | 718 | static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | 
|  | 719 | int addr_len) | 
|  | 720 | { | 
|  | 721 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | 
|  | 722 | struct sock *sk = sock->sk; | 
|  | 723 | struct iucv_sock *iucv; | 
|  | 724 | int err = 0; | 
|  | 725 | struct net_device *dev; | 
|  | 726 | char uid[9]; | 
|  | 727 |  | 
|  | 728 | /* Verify the input sockaddr */ | 
|  | 729 | if (addr_len < sizeof(struct sockaddr_iucv) || | 
|  | 730 | addr->sa_family != AF_IUCV) | 
|  | 731 | return -EINVAL; | 
|  | 732 |  | 
|  | 733 | lock_sock(sk); | 
|  | 734 | if (sk->sk_state != IUCV_OPEN) { | 
|  | 735 | err = -EBADFD; | 
|  | 736 | goto done; | 
|  | 737 | } | 
|  | 738 |  | 
|  | 739 | write_lock_bh(&iucv_sk_list.lock); | 
|  | 740 |  | 
|  | 741 | iucv = iucv_sk(sk); | 
|  | 742 | if (__iucv_get_sock_by_name(sa->siucv_name)) { | 
|  | 743 | err = -EADDRINUSE; | 
|  | 744 | goto done_unlock; | 
|  | 745 | } | 
|  | 746 | if (iucv->path) | 
|  | 747 | goto done_unlock; | 
|  | 748 |  | 
|  | 749 | /* Bind the socket */ | 
|  | 750 | if (pr_iucv) | 
|  | 751 | if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) | 
|  | 752 | goto vm_bind; /* VM IUCV transport */ | 
|  | 753 |  | 
|  | 754 | /* try hiper transport */ | 
|  | 755 | memcpy(uid, sa->siucv_user_id, sizeof(uid)); | 
|  | 756 | ASCEBC(uid, 8); | 
|  | 757 | rcu_read_lock(); | 
|  | 758 | for_each_netdev_rcu(&init_net, dev) { | 
|  | 759 | if (!memcmp(dev->perm_addr, uid, 8)) { | 
|  | 760 | memcpy(iucv->src_user_id, sa->siucv_user_id, 8); | 
|  | 761 | /* Check for unitialized siucv_name */ | 
|  | 762 | if (strncmp(sa->siucv_name, "        ", 8) == 0) | 
|  | 763 | __iucv_auto_name(iucv); | 
|  | 764 | else | 
|  | 765 | memcpy(iucv->src_name, sa->siucv_name, 8); | 
|  | 766 | sk->sk_bound_dev_if = dev->ifindex; | 
|  | 767 | iucv->hs_dev = dev; | 
|  | 768 | dev_hold(dev); | 
|  | 769 | sk->sk_state = IUCV_BOUND; | 
|  | 770 | iucv->transport = AF_IUCV_TRANS_HIPER; | 
|  | 771 | if (!iucv->msglimit) | 
|  | 772 | iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; | 
|  | 773 | rcu_read_unlock(); | 
|  | 774 | goto done_unlock; | 
|  | 775 | } | 
|  | 776 | } | 
|  | 777 | rcu_read_unlock(); | 
|  | 778 | vm_bind: | 
|  | 779 | if (pr_iucv) { | 
|  | 780 | /* use local userid for backward compat */ | 
|  | 781 | memcpy(iucv->src_name, sa->siucv_name, 8); | 
|  | 782 | memcpy(iucv->src_user_id, iucv_userid, 8); | 
|  | 783 | sk->sk_state = IUCV_BOUND; | 
|  | 784 | iucv->transport = AF_IUCV_TRANS_IUCV; | 
|  | 785 | if (!iucv->msglimit) | 
|  | 786 | iucv->msglimit = IUCV_QUEUELEN_DEFAULT; | 
|  | 787 | goto done_unlock; | 
|  | 788 | } | 
|  | 789 | /* found no dev to bind */ | 
|  | 790 | err = -ENODEV; | 
|  | 791 | done_unlock: | 
|  | 792 | /* Release the socket list lock */ | 
|  | 793 | write_unlock_bh(&iucv_sk_list.lock); | 
|  | 794 | done: | 
|  | 795 | release_sock(sk); | 
|  | 796 | return err; | 
|  | 797 | } | 
|  | 798 |  | 
|  | 799 | /* Automatically bind an unbound socket */ | 
|  | 800 | static int iucv_sock_autobind(struct sock *sk) | 
|  | 801 | { | 
|  | 802 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 803 | int err = 0; | 
|  | 804 |  | 
|  | 805 | if (unlikely(!pr_iucv)) | 
|  | 806 | return -EPROTO; | 
|  | 807 |  | 
|  | 808 | memcpy(iucv->src_user_id, iucv_userid, 8); | 
|  | 809 |  | 
|  | 810 | write_lock_bh(&iucv_sk_list.lock); | 
|  | 811 | __iucv_auto_name(iucv); | 
|  | 812 | write_unlock_bh(&iucv_sk_list.lock); | 
|  | 813 |  | 
|  | 814 | if (!iucv->msglimit) | 
|  | 815 | iucv->msglimit = IUCV_QUEUELEN_DEFAULT; | 
|  | 816 |  | 
|  | 817 | return err; | 
|  | 818 | } | 
|  | 819 |  | 
|  | 820 | static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) | 
|  | 821 | { | 
|  | 822 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | 
|  | 823 | struct sock *sk = sock->sk; | 
|  | 824 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 825 | unsigned char user_data[16]; | 
|  | 826 | int err; | 
|  | 827 |  | 
|  | 828 | high_nmcpy(user_data, sa->siucv_name); | 
|  | 829 | low_nmcpy(user_data, iucv->src_name); | 
|  | 830 | ASCEBC(user_data, sizeof(user_data)); | 
|  | 831 |  | 
|  | 832 | /* Create path. */ | 
|  | 833 | iucv->path = iucv_path_alloc(iucv->msglimit, | 
|  | 834 | IUCV_IPRMDATA, GFP_KERNEL); | 
|  | 835 | if (!iucv->path) { | 
|  | 836 | err = -ENOMEM; | 
|  | 837 | goto done; | 
|  | 838 | } | 
|  | 839 | err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, | 
|  | 840 | sa->siucv_user_id, NULL, user_data, | 
|  | 841 | sk); | 
|  | 842 | if (err) { | 
|  | 843 | iucv_path_free(iucv->path); | 
|  | 844 | iucv->path = NULL; | 
|  | 845 | switch (err) { | 
|  | 846 | case 0x0b:	/* Target communicator is not logged on */ | 
|  | 847 | err = -ENETUNREACH; | 
|  | 848 | break; | 
|  | 849 | case 0x0d:	/* Max connections for this guest exceeded */ | 
|  | 850 | case 0x0e:	/* Max connections for target guest exceeded */ | 
|  | 851 | err = -EAGAIN; | 
|  | 852 | break; | 
|  | 853 | case 0x0f:	/* Missing IUCV authorization */ | 
|  | 854 | err = -EACCES; | 
|  | 855 | break; | 
|  | 856 | default: | 
|  | 857 | err = -ECONNREFUSED; | 
|  | 858 | break; | 
|  | 859 | } | 
|  | 860 | } | 
|  | 861 | done: | 
|  | 862 | return err; | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | /* Connect an unconnected socket */ | 
|  | 866 | static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | 
|  | 867 | int alen, int flags) | 
|  | 868 | { | 
|  | 869 | struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; | 
|  | 870 | struct sock *sk = sock->sk; | 
|  | 871 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 872 | int err; | 
|  | 873 |  | 
|  | 874 | if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) | 
|  | 875 | return -EINVAL; | 
|  | 876 |  | 
|  | 877 | if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) | 
|  | 878 | return -EBADFD; | 
|  | 879 |  | 
|  | 880 | if (sk->sk_state == IUCV_OPEN && | 
|  | 881 | iucv->transport == AF_IUCV_TRANS_HIPER) | 
|  | 882 | return -EBADFD; /* explicit bind required */ | 
|  | 883 |  | 
|  | 884 | if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) | 
|  | 885 | return -EINVAL; | 
|  | 886 |  | 
|  | 887 | if (sk->sk_state == IUCV_OPEN) { | 
|  | 888 | err = iucv_sock_autobind(sk); | 
|  | 889 | if (unlikely(err)) | 
|  | 890 | return err; | 
|  | 891 | } | 
|  | 892 |  | 
|  | 893 | lock_sock(sk); | 
|  | 894 |  | 
|  | 895 | /* Set the destination information */ | 
|  | 896 | memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); | 
|  | 897 | memcpy(iucv->dst_name, sa->siucv_name, 8); | 
|  | 898 |  | 
|  | 899 | if (iucv->transport == AF_IUCV_TRANS_HIPER) | 
|  | 900 | err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); | 
|  | 901 | else | 
|  | 902 | err = afiucv_path_connect(sock, addr); | 
|  | 903 | if (err) | 
|  | 904 | goto done; | 
|  | 905 |  | 
|  | 906 | if (sk->sk_state != IUCV_CONNECTED) | 
|  | 907 | err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, | 
|  | 908 | IUCV_DISCONN), | 
|  | 909 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 
|  | 910 |  | 
|  | 911 | if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) | 
|  | 912 | err = -ECONNREFUSED; | 
|  | 913 |  | 
|  | 914 | if (err && iucv->transport == AF_IUCV_TRANS_IUCV) | 
|  | 915 | iucv_sever_path(sk, 0); | 
|  | 916 |  | 
|  | 917 | done: | 
|  | 918 | release_sock(sk); | 
|  | 919 | return err; | 
|  | 920 | } | 
|  | 921 |  | 
|  | 922 | /* Move a socket into listening state. */ | 
|  | 923 | static int iucv_sock_listen(struct socket *sock, int backlog) | 
|  | 924 | { | 
|  | 925 | struct sock *sk = sock->sk; | 
|  | 926 | int err; | 
|  | 927 |  | 
|  | 928 | lock_sock(sk); | 
|  | 929 |  | 
|  | 930 | err = -EINVAL; | 
|  | 931 | if (sk->sk_state != IUCV_BOUND) | 
|  | 932 | goto done; | 
|  | 933 |  | 
|  | 934 | if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) | 
|  | 935 | goto done; | 
|  | 936 |  | 
|  | 937 | sk->sk_max_ack_backlog = backlog; | 
|  | 938 | sk->sk_ack_backlog = 0; | 
|  | 939 | sk->sk_state = IUCV_LISTEN; | 
|  | 940 | err = 0; | 
|  | 941 |  | 
|  | 942 | done: | 
|  | 943 | release_sock(sk); | 
|  | 944 | return err; | 
|  | 945 | } | 
|  | 946 |  | 
|  | 947 | /* Accept a pending connection */ | 
|  | 948 | static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | 
|  | 949 | int flags, bool kern) | 
|  | 950 | { | 
|  | 951 | DECLARE_WAITQUEUE(wait, current); | 
|  | 952 | struct sock *sk = sock->sk, *nsk; | 
|  | 953 | long timeo; | 
|  | 954 | int err = 0; | 
|  | 955 |  | 
|  | 956 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); | 
|  | 957 |  | 
|  | 958 | if (sk->sk_state != IUCV_LISTEN) { | 
|  | 959 | err = -EBADFD; | 
|  | 960 | goto done; | 
|  | 961 | } | 
|  | 962 |  | 
|  | 963 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 
|  | 964 |  | 
|  | 965 | /* Wait for an incoming connection */ | 
|  | 966 | add_wait_queue_exclusive(sk_sleep(sk), &wait); | 
|  | 967 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { | 
|  | 968 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 969 | if (!timeo) { | 
|  | 970 | err = -EAGAIN; | 
|  | 971 | break; | 
|  | 972 | } | 
|  | 973 |  | 
|  | 974 | release_sock(sk); | 
|  | 975 | timeo = schedule_timeout(timeo); | 
|  | 976 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); | 
|  | 977 |  | 
|  | 978 | if (sk->sk_state != IUCV_LISTEN) { | 
|  | 979 | err = -EBADFD; | 
|  | 980 | break; | 
|  | 981 | } | 
|  | 982 |  | 
|  | 983 | if (signal_pending(current)) { | 
|  | 984 | err = sock_intr_errno(timeo); | 
|  | 985 | break; | 
|  | 986 | } | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 | set_current_state(TASK_RUNNING); | 
|  | 990 | remove_wait_queue(sk_sleep(sk), &wait); | 
|  | 991 |  | 
|  | 992 | if (err) | 
|  | 993 | goto done; | 
|  | 994 |  | 
|  | 995 | newsock->state = SS_CONNECTED; | 
|  | 996 |  | 
|  | 997 | done: | 
|  | 998 | release_sock(sk); | 
|  | 999 | return err; | 
|  | 1000 | } | 
|  | 1001 |  | 
|  | 1002 | static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, | 
|  | 1003 | int peer) | 
|  | 1004 | { | 
|  | 1005 | struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; | 
|  | 1006 | struct sock *sk = sock->sk; | 
|  | 1007 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1008 |  | 
|  | 1009 | addr->sa_family = AF_IUCV; | 
|  | 1010 |  | 
|  | 1011 | if (peer) { | 
|  | 1012 | memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); | 
|  | 1013 | memcpy(siucv->siucv_name, iucv->dst_name, 8); | 
|  | 1014 | } else { | 
|  | 1015 | memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); | 
|  | 1016 | memcpy(siucv->siucv_name, iucv->src_name, 8); | 
|  | 1017 | } | 
|  | 1018 | memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); | 
|  | 1019 | memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); | 
|  | 1020 | memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); | 
|  | 1021 |  | 
|  | 1022 | return sizeof(struct sockaddr_iucv); | 
|  | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | /** | 
|  | 1026 | * iucv_send_iprm() - Send socket data in parameter list of an iucv message. | 
|  | 1027 | * @path:	IUCV path | 
|  | 1028 | * @msg:	Pointer to a struct iucv_message | 
|  | 1029 | * @skb:	The socket data to send, skb->len MUST BE <= 7 | 
|  | 1030 | * | 
|  | 1031 | * Send the socket data in the parameter list in the iucv message | 
|  | 1032 | * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter | 
|  | 1033 | * list and the socket data len at index 7 (last byte). | 
|  | 1034 | * See also iucv_msg_length(). | 
|  | 1035 | * | 
|  | 1036 | * Returns the error code from the iucv_message_send() call. | 
|  | 1037 | */ | 
|  | 1038 | static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, | 
|  | 1039 | struct sk_buff *skb) | 
|  | 1040 | { | 
|  | 1041 | u8 prmdata[8]; | 
|  | 1042 |  | 
|  | 1043 | memcpy(prmdata, (void *) skb->data, skb->len); | 
|  | 1044 | prmdata[7] = 0xff - (u8) skb->len; | 
|  | 1045 | return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, | 
|  | 1046 | (void *) prmdata, 8); | 
|  | 1047 | } | 
|  | 1048 |  | 
|  | 1049 | static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, | 
|  | 1050 | size_t len) | 
|  | 1051 | { | 
|  | 1052 | struct sock *sk = sock->sk; | 
|  | 1053 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1054 | size_t headroom = 0; | 
|  | 1055 | size_t linear; | 
|  | 1056 | struct sk_buff *skb; | 
|  | 1057 | struct iucv_message txmsg = {0}; | 
|  | 1058 | struct cmsghdr *cmsg; | 
|  | 1059 | int cmsg_done; | 
|  | 1060 | long timeo; | 
|  | 1061 | char user_id[9]; | 
|  | 1062 | char appl_id[9]; | 
|  | 1063 | int err; | 
|  | 1064 | int noblock = msg->msg_flags & MSG_DONTWAIT; | 
|  | 1065 |  | 
|  | 1066 | err = sock_error(sk); | 
|  | 1067 | if (err) | 
|  | 1068 | return err; | 
|  | 1069 |  | 
|  | 1070 | if (msg->msg_flags & MSG_OOB) | 
|  | 1071 | return -EOPNOTSUPP; | 
|  | 1072 |  | 
|  | 1073 | /* SOCK_SEQPACKET: we do not support segmented records */ | 
|  | 1074 | if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) | 
|  | 1075 | return -EOPNOTSUPP; | 
|  | 1076 |  | 
|  | 1077 | lock_sock(sk); | 
|  | 1078 |  | 
|  | 1079 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 
|  | 1080 | err = -EPIPE; | 
|  | 1081 | goto out; | 
|  | 1082 | } | 
|  | 1083 |  | 
|  | 1084 | /* Return if the socket is not in connected state */ | 
|  | 1085 | if (sk->sk_state != IUCV_CONNECTED) { | 
|  | 1086 | err = -ENOTCONN; | 
|  | 1087 | goto out; | 
|  | 1088 | } | 
|  | 1089 |  | 
|  | 1090 | /* initialize defaults */ | 
|  | 1091 | cmsg_done   = 0;	/* check for duplicate headers */ | 
|  | 1092 | txmsg.class = 0; | 
|  | 1093 |  | 
|  | 1094 | /* iterate over control messages */ | 
|  | 1095 | for_each_cmsghdr(cmsg, msg) { | 
|  | 1096 | if (!CMSG_OK(msg, cmsg)) { | 
|  | 1097 | err = -EINVAL; | 
|  | 1098 | goto out; | 
|  | 1099 | } | 
|  | 1100 |  | 
|  | 1101 | if (cmsg->cmsg_level != SOL_IUCV) | 
|  | 1102 | continue; | 
|  | 1103 |  | 
|  | 1104 | if (cmsg->cmsg_type & cmsg_done) { | 
|  | 1105 | err = -EINVAL; | 
|  | 1106 | goto out; | 
|  | 1107 | } | 
|  | 1108 | cmsg_done |= cmsg->cmsg_type; | 
|  | 1109 |  | 
|  | 1110 | switch (cmsg->cmsg_type) { | 
|  | 1111 | case SCM_IUCV_TRGCLS: | 
|  | 1112 | if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { | 
|  | 1113 | err = -EINVAL; | 
|  | 1114 | goto out; | 
|  | 1115 | } | 
|  | 1116 |  | 
|  | 1117 | /* set iucv message target class */ | 
|  | 1118 | memcpy(&txmsg.class, | 
|  | 1119 | (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); | 
|  | 1120 |  | 
|  | 1121 | break; | 
|  | 1122 |  | 
|  | 1123 | default: | 
|  | 1124 | err = -EINVAL; | 
|  | 1125 | goto out; | 
|  | 1126 | } | 
|  | 1127 | } | 
|  | 1128 |  | 
|  | 1129 | /* allocate one skb for each iucv message: | 
|  | 1130 | * this is fine for SOCK_SEQPACKET (unless we want to support | 
|  | 1131 | * segmented records using the MSG_EOR flag), but | 
|  | 1132 | * for SOCK_STREAM we might want to improve it in future */ | 
|  | 1133 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 
|  | 1134 | headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; | 
|  | 1135 | linear = len; | 
|  | 1136 | } else { | 
|  | 1137 | if (len < PAGE_SIZE) { | 
|  | 1138 | linear = len; | 
|  | 1139 | } else { | 
|  | 1140 | /* In nonlinear "classic" iucv skb, | 
|  | 1141 | * reserve space for iucv_array | 
|  | 1142 | */ | 
|  | 1143 | headroom = sizeof(struct iucv_array) * | 
|  | 1144 | (MAX_SKB_FRAGS + 1); | 
|  | 1145 | linear = PAGE_SIZE - headroom; | 
|  | 1146 | } | 
|  | 1147 | } | 
|  | 1148 | skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, | 
|  | 1149 | noblock, &err, 0); | 
|  | 1150 | if (!skb) | 
|  | 1151 | goto out; | 
|  | 1152 | if (headroom) | 
|  | 1153 | skb_reserve(skb, headroom); | 
|  | 1154 | skb_put(skb, linear); | 
|  | 1155 | skb->len = len; | 
|  | 1156 | skb->data_len = len - linear; | 
|  | 1157 | err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); | 
|  | 1158 | if (err) | 
|  | 1159 | goto fail; | 
|  | 1160 |  | 
|  | 1161 | /* wait if outstanding messages for iucv path has reached */ | 
|  | 1162 | timeo = sock_sndtimeo(sk, noblock); | 
|  | 1163 | err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); | 
|  | 1164 | if (err) | 
|  | 1165 | goto fail; | 
|  | 1166 |  | 
|  | 1167 | /* return -ECONNRESET if the socket is no longer connected */ | 
|  | 1168 | if (sk->sk_state != IUCV_CONNECTED) { | 
|  | 1169 | err = -ECONNRESET; | 
|  | 1170 | goto fail; | 
|  | 1171 | } | 
|  | 1172 |  | 
|  | 1173 | /* increment and save iucv message tag for msg_completion cbk */ | 
|  | 1174 | txmsg.tag = iucv->send_tag++; | 
|  | 1175 | IUCV_SKB_CB(skb)->tag = txmsg.tag; | 
|  | 1176 |  | 
|  | 1177 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 
|  | 1178 | atomic_inc(&iucv->msg_sent); | 
|  | 1179 | err = afiucv_hs_send(&txmsg, sk, skb, 0); | 
|  | 1180 | if (err) { | 
|  | 1181 | atomic_dec(&iucv->msg_sent); | 
|  | 1182 | goto out; | 
|  | 1183 | } | 
|  | 1184 | } else { /* Classic VM IUCV transport */ | 
|  | 1185 | skb_queue_tail(&iucv->send_skb_q, skb); | 
|  | 1186 |  | 
|  | 1187 | if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && | 
|  | 1188 | skb->len <= 7) { | 
|  | 1189 | err = iucv_send_iprm(iucv->path, &txmsg, skb); | 
|  | 1190 |  | 
|  | 1191 | /* on success: there is no message_complete callback */ | 
|  | 1192 | /* for an IPRMDATA msg; remove skb from send queue   */ | 
|  | 1193 | if (err == 0) { | 
|  | 1194 | skb_unlink(skb, &iucv->send_skb_q); | 
|  | 1195 | kfree_skb(skb); | 
|  | 1196 | } | 
|  | 1197 |  | 
|  | 1198 | /* this error should never happen since the	*/ | 
|  | 1199 | /* IUCV_IPRMDATA path flag is set... sever path */ | 
|  | 1200 | if (err == 0x15) { | 
|  | 1201 | pr_iucv->path_sever(iucv->path, NULL); | 
|  | 1202 | skb_unlink(skb, &iucv->send_skb_q); | 
|  | 1203 | err = -EPIPE; | 
|  | 1204 | goto fail; | 
|  | 1205 | } | 
|  | 1206 | } else if (skb_is_nonlinear(skb)) { | 
|  | 1207 | struct iucv_array *iba = (struct iucv_array *)skb->head; | 
|  | 1208 | int i; | 
|  | 1209 |  | 
|  | 1210 | /* skip iucv_array lying in the headroom */ | 
|  | 1211 | iba[0].address = (u32)(addr_t)skb->data; | 
|  | 1212 | iba[0].length = (u32)skb_headlen(skb); | 
|  | 1213 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|  | 1214 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|  | 1215 |  | 
|  | 1216 | iba[i + 1].address = | 
|  | 1217 | (u32)(addr_t)skb_frag_address(frag); | 
|  | 1218 | iba[i + 1].length = (u32)skb_frag_size(frag); | 
|  | 1219 | } | 
|  | 1220 | err = pr_iucv->message_send(iucv->path, &txmsg, | 
|  | 1221 | IUCV_IPBUFLST, 0, | 
|  | 1222 | (void *)iba, skb->len); | 
|  | 1223 | } else { /* non-IPRM Linear skb */ | 
|  | 1224 | err = pr_iucv->message_send(iucv->path, &txmsg, | 
|  | 1225 | 0, 0, (void *)skb->data, skb->len); | 
|  | 1226 | } | 
|  | 1227 | if (err) { | 
|  | 1228 | if (err == 3) { | 
|  | 1229 | user_id[8] = 0; | 
|  | 1230 | memcpy(user_id, iucv->dst_user_id, 8); | 
|  | 1231 | appl_id[8] = 0; | 
|  | 1232 | memcpy(appl_id, iucv->dst_name, 8); | 
|  | 1233 | pr_err( | 
|  | 1234 | "Application %s on z/VM guest %s exceeds message limit\n", | 
|  | 1235 | appl_id, user_id); | 
|  | 1236 | err = -EAGAIN; | 
|  | 1237 | } else { | 
|  | 1238 | err = -EPIPE; | 
|  | 1239 | } | 
|  | 1240 | skb_unlink(skb, &iucv->send_skb_q); | 
|  | 1241 | goto fail; | 
|  | 1242 | } | 
|  | 1243 | } | 
|  | 1244 |  | 
|  | 1245 | release_sock(sk); | 
|  | 1246 | return len; | 
|  | 1247 |  | 
|  | 1248 | fail: | 
|  | 1249 | kfree_skb(skb); | 
|  | 1250 | out: | 
|  | 1251 | release_sock(sk); | 
|  | 1252 | return err; | 
|  | 1253 | } | 
|  | 1254 |  | 
|  | 1255 | static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) | 
|  | 1256 | { | 
|  | 1257 | size_t headroom, linear; | 
|  | 1258 | struct sk_buff *skb; | 
|  | 1259 | int err; | 
|  | 1260 |  | 
|  | 1261 | if (len < PAGE_SIZE) { | 
|  | 1262 | headroom = 0; | 
|  | 1263 | linear = len; | 
|  | 1264 | } else { | 
|  | 1265 | headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); | 
|  | 1266 | linear = PAGE_SIZE - headroom; | 
|  | 1267 | } | 
|  | 1268 | skb = alloc_skb_with_frags(headroom + linear, len - linear, | 
|  | 1269 | 0, &err, GFP_ATOMIC | GFP_DMA); | 
|  | 1270 | WARN_ONCE(!skb, | 
|  | 1271 | "alloc of recv iucv skb len=%lu failed with errcode=%d\n", | 
|  | 1272 | len, err); | 
|  | 1273 | if (skb) { | 
|  | 1274 | if (headroom) | 
|  | 1275 | skb_reserve(skb, headroom); | 
|  | 1276 | skb_put(skb, linear); | 
|  | 1277 | skb->len = len; | 
|  | 1278 | skb->data_len = len - linear; | 
|  | 1279 | } | 
|  | 1280 | return skb; | 
|  | 1281 | } | 
|  | 1282 |  | 
|  | 1283 | /* iucv_process_message() - Receive a single outstanding IUCV message | 
|  | 1284 | * | 
|  | 1285 | * Locking: must be called with message_q.lock held | 
|  | 1286 | */ | 
|  | 1287 | static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | 
|  | 1288 | struct iucv_path *path, | 
|  | 1289 | struct iucv_message *msg) | 
|  | 1290 | { | 
|  | 1291 | int rc; | 
|  | 1292 | unsigned int len; | 
|  | 1293 |  | 
|  | 1294 | len = iucv_msg_length(msg); | 
|  | 1295 |  | 
|  | 1296 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ | 
|  | 1297 | /* Note: the first 4 bytes are reserved for msg tag */ | 
|  | 1298 | IUCV_SKB_CB(skb)->class = msg->class; | 
|  | 1299 |  | 
|  | 1300 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ | 
|  | 1301 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { | 
|  | 1302 | if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { | 
|  | 1303 | skb->data = NULL; | 
|  | 1304 | skb->len = 0; | 
|  | 1305 | } | 
|  | 1306 | } else { | 
|  | 1307 | if (skb_is_nonlinear(skb)) { | 
|  | 1308 | struct iucv_array *iba = (struct iucv_array *)skb->head; | 
|  | 1309 | int i; | 
|  | 1310 |  | 
|  | 1311 | iba[0].address = (u32)(addr_t)skb->data; | 
|  | 1312 | iba[0].length = (u32)skb_headlen(skb); | 
|  | 1313 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|  | 1314 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|  | 1315 |  | 
|  | 1316 | iba[i + 1].address = | 
|  | 1317 | (u32)(addr_t)skb_frag_address(frag); | 
|  | 1318 | iba[i + 1].length = (u32)skb_frag_size(frag); | 
|  | 1319 | } | 
|  | 1320 | rc = pr_iucv->message_receive(path, msg, | 
|  | 1321 | IUCV_IPBUFLST, | 
|  | 1322 | (void *)iba, len, NULL); | 
|  | 1323 | } else { | 
|  | 1324 | rc = pr_iucv->message_receive(path, msg, | 
|  | 1325 | msg->flags & IUCV_IPRMDATA, | 
|  | 1326 | skb->data, len, NULL); | 
|  | 1327 | } | 
|  | 1328 | if (rc) { | 
|  | 1329 | kfree_skb(skb); | 
|  | 1330 | return; | 
|  | 1331 | } | 
|  | 1332 | WARN_ON_ONCE(skb->len != len); | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | IUCV_SKB_CB(skb)->offset = 0; | 
|  | 1336 | if (sk_filter(sk, skb)) { | 
|  | 1337 | atomic_inc(&sk->sk_drops);	/* skb rejected by filter */ | 
|  | 1338 | kfree_skb(skb); | 
|  | 1339 | return; | 
|  | 1340 | } | 
|  | 1341 | if (__sock_queue_rcv_skb(sk, skb))	/* handle rcv queue full */ | 
|  | 1342 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | 
|  | 1343 | } | 
|  | 1344 |  | 
|  | 1345 | /* iucv_process_message_q() - Process outstanding IUCV messages | 
|  | 1346 | * | 
|  | 1347 | * Locking: must be called with message_q.lock held | 
|  | 1348 | */ | 
|  | 1349 | static void iucv_process_message_q(struct sock *sk) | 
|  | 1350 | { | 
|  | 1351 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1352 | struct sk_buff *skb; | 
|  | 1353 | struct sock_msg_q *p, *n; | 
|  | 1354 |  | 
|  | 1355 | list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { | 
|  | 1356 | skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); | 
|  | 1357 | if (!skb) | 
|  | 1358 | break; | 
|  | 1359 | iucv_process_message(sk, skb, p->path, &p->msg); | 
|  | 1360 | list_del(&p->list); | 
|  | 1361 | kfree(p); | 
|  | 1362 | if (!skb_queue_empty(&iucv->backlog_skb_q)) | 
|  | 1363 | break; | 
|  | 1364 | } | 
|  | 1365 | } | 
|  | 1366 |  | 
|  | 1367 | static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, | 
|  | 1368 | size_t len, int flags) | 
|  | 1369 | { | 
|  | 1370 | int noblock = flags & MSG_DONTWAIT; | 
|  | 1371 | struct sock *sk = sock->sk; | 
|  | 1372 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1373 | unsigned int copied, rlen; | 
|  | 1374 | struct sk_buff *skb, *rskb, *cskb; | 
|  | 1375 | int err = 0; | 
|  | 1376 | u32 offset; | 
|  | 1377 |  | 
|  | 1378 | if ((sk->sk_state == IUCV_DISCONN) && | 
|  | 1379 | skb_queue_empty(&iucv->backlog_skb_q) && | 
|  | 1380 | skb_queue_empty(&sk->sk_receive_queue) && | 
|  | 1381 | list_empty(&iucv->message_q.list)) | 
|  | 1382 | return 0; | 
|  | 1383 |  | 
|  | 1384 | if (flags & (MSG_OOB)) | 
|  | 1385 | return -EOPNOTSUPP; | 
|  | 1386 |  | 
|  | 1387 | /* receive/dequeue next skb: | 
|  | 1388 | * the function understands MSG_PEEK and, thus, does not dequeue skb */ | 
|  | 1389 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 
|  | 1390 | if (!skb) { | 
|  | 1391 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 
|  | 1392 | return 0; | 
|  | 1393 | return err; | 
|  | 1394 | } | 
|  | 1395 |  | 
|  | 1396 | offset = IUCV_SKB_CB(skb)->offset; | 
|  | 1397 | rlen   = skb->len - offset;		/* real length of skb */ | 
|  | 1398 | copied = min_t(unsigned int, rlen, len); | 
|  | 1399 | if (!rlen) | 
|  | 1400 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; | 
|  | 1401 |  | 
|  | 1402 | cskb = skb; | 
|  | 1403 | if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { | 
|  | 1404 | if (!(flags & MSG_PEEK)) | 
|  | 1405 | skb_queue_head(&sk->sk_receive_queue, skb); | 
|  | 1406 | return -EFAULT; | 
|  | 1407 | } | 
|  | 1408 |  | 
|  | 1409 | /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ | 
|  | 1410 | if (sk->sk_type == SOCK_SEQPACKET) { | 
|  | 1411 | if (copied < rlen) | 
|  | 1412 | msg->msg_flags |= MSG_TRUNC; | 
|  | 1413 | /* each iucv message contains a complete record */ | 
|  | 1414 | msg->msg_flags |= MSG_EOR; | 
|  | 1415 | } | 
|  | 1416 |  | 
|  | 1417 | /* create control message to store iucv msg target class: | 
|  | 1418 | * get the trgcls from the control buffer of the skb due to | 
|  | 1419 | * fragmentation of original iucv message. */ | 
|  | 1420 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, | 
|  | 1421 | sizeof(IUCV_SKB_CB(skb)->class), | 
|  | 1422 | (void *)&IUCV_SKB_CB(skb)->class); | 
|  | 1423 | if (err) { | 
|  | 1424 | if (!(flags & MSG_PEEK)) | 
|  | 1425 | skb_queue_head(&sk->sk_receive_queue, skb); | 
|  | 1426 | return err; | 
|  | 1427 | } | 
|  | 1428 |  | 
|  | 1429 | /* Mark read part of skb as used */ | 
|  | 1430 | if (!(flags & MSG_PEEK)) { | 
|  | 1431 |  | 
|  | 1432 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ | 
|  | 1433 | if (sk->sk_type == SOCK_STREAM) { | 
|  | 1434 | if (copied < rlen) { | 
|  | 1435 | IUCV_SKB_CB(skb)->offset = offset + copied; | 
|  | 1436 | skb_queue_head(&sk->sk_receive_queue, skb); | 
|  | 1437 | goto done; | 
|  | 1438 | } | 
|  | 1439 | } | 
|  | 1440 |  | 
|  | 1441 | kfree_skb(skb); | 
|  | 1442 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 
|  | 1443 | atomic_inc(&iucv->msg_recv); | 
|  | 1444 | if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { | 
|  | 1445 | WARN_ON(1); | 
|  | 1446 | iucv_sock_close(sk); | 
|  | 1447 | return -EFAULT; | 
|  | 1448 | } | 
|  | 1449 | } | 
|  | 1450 |  | 
|  | 1451 | /* Queue backlog skbs */ | 
|  | 1452 | spin_lock_bh(&iucv->message_q.lock); | 
|  | 1453 | rskb = skb_dequeue(&iucv->backlog_skb_q); | 
|  | 1454 | while (rskb) { | 
|  | 1455 | IUCV_SKB_CB(rskb)->offset = 0; | 
|  | 1456 | if (__sock_queue_rcv_skb(sk, rskb)) { | 
|  | 1457 | /* handle rcv queue full */ | 
|  | 1458 | skb_queue_head(&iucv->backlog_skb_q, | 
|  | 1459 | rskb); | 
|  | 1460 | break; | 
|  | 1461 | } | 
|  | 1462 | rskb = skb_dequeue(&iucv->backlog_skb_q); | 
|  | 1463 | } | 
|  | 1464 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | 
|  | 1465 | if (!list_empty(&iucv->message_q.list)) | 
|  | 1466 | iucv_process_message_q(sk); | 
|  | 1467 | if (atomic_read(&iucv->msg_recv) >= | 
|  | 1468 | iucv->msglimit / 2) { | 
|  | 1469 | err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); | 
|  | 1470 | if (err) { | 
|  | 1471 | sk->sk_state = IUCV_DISCONN; | 
|  | 1472 | sk->sk_state_change(sk); | 
|  | 1473 | } | 
|  | 1474 | } | 
|  | 1475 | } | 
|  | 1476 | spin_unlock_bh(&iucv->message_q.lock); | 
|  | 1477 | } | 
|  | 1478 |  | 
|  | 1479 | done: | 
|  | 1480 | /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ | 
|  | 1481 | if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) | 
|  | 1482 | copied = rlen; | 
|  | 1483 |  | 
|  | 1484 | return copied; | 
|  | 1485 | } | 
|  | 1486 |  | 
|  | 1487 | static inline __poll_t iucv_accept_poll(struct sock *parent) | 
|  | 1488 | { | 
|  | 1489 | struct iucv_sock *isk, *n; | 
|  | 1490 | struct sock *sk; | 
|  | 1491 |  | 
|  | 1492 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { | 
|  | 1493 | sk = (struct sock *) isk; | 
|  | 1494 |  | 
|  | 1495 | if (sk->sk_state == IUCV_CONNECTED) | 
|  | 1496 | return EPOLLIN | EPOLLRDNORM; | 
|  | 1497 | } | 
|  | 1498 |  | 
|  | 1499 | return 0; | 
|  | 1500 | } | 
|  | 1501 |  | 
|  | 1502 | __poll_t iucv_sock_poll(struct file *file, struct socket *sock, | 
|  | 1503 | poll_table *wait) | 
|  | 1504 | { | 
|  | 1505 | struct sock *sk = sock->sk; | 
|  | 1506 | __poll_t mask = 0; | 
|  | 1507 |  | 
|  | 1508 | sock_poll_wait(file, sock, wait); | 
|  | 1509 |  | 
|  | 1510 | if (sk->sk_state == IUCV_LISTEN) | 
|  | 1511 | return iucv_accept_poll(sk); | 
|  | 1512 |  | 
|  | 1513 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | 
|  | 1514 | mask |= EPOLLERR | | 
|  | 1515 | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); | 
|  | 1516 |  | 
|  | 1517 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 
|  | 1518 | mask |= EPOLLRDHUP; | 
|  | 1519 |  | 
|  | 1520 | if (sk->sk_shutdown == SHUTDOWN_MASK) | 
|  | 1521 | mask |= EPOLLHUP; | 
|  | 1522 |  | 
|  | 1523 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 
|  | 1524 | (sk->sk_shutdown & RCV_SHUTDOWN)) | 
|  | 1525 | mask |= EPOLLIN | EPOLLRDNORM; | 
|  | 1526 |  | 
|  | 1527 | if (sk->sk_state == IUCV_CLOSED) | 
|  | 1528 | mask |= EPOLLHUP; | 
|  | 1529 |  | 
|  | 1530 | if (sk->sk_state == IUCV_DISCONN) | 
|  | 1531 | mask |= EPOLLIN; | 
|  | 1532 |  | 
|  | 1533 | if (sock_writeable(sk) && iucv_below_msglim(sk)) | 
|  | 1534 | mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; | 
|  | 1535 | else | 
|  | 1536 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); | 
|  | 1537 |  | 
|  | 1538 | return mask; | 
|  | 1539 | } | 
|  | 1540 |  | 
|  | 1541 | static int iucv_sock_shutdown(struct socket *sock, int how) | 
|  | 1542 | { | 
|  | 1543 | struct sock *sk = sock->sk; | 
|  | 1544 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1545 | struct iucv_message txmsg; | 
|  | 1546 | int err = 0; | 
|  | 1547 |  | 
|  | 1548 | how++; | 
|  | 1549 |  | 
|  | 1550 | if ((how & ~SHUTDOWN_MASK) || !how) | 
|  | 1551 | return -EINVAL; | 
|  | 1552 |  | 
|  | 1553 | lock_sock(sk); | 
|  | 1554 | switch (sk->sk_state) { | 
|  | 1555 | case IUCV_LISTEN: | 
|  | 1556 | case IUCV_DISCONN: | 
|  | 1557 | case IUCV_CLOSING: | 
|  | 1558 | case IUCV_CLOSED: | 
|  | 1559 | err = -ENOTCONN; | 
|  | 1560 | goto fail; | 
|  | 1561 | default: | 
|  | 1562 | break; | 
|  | 1563 | } | 
|  | 1564 |  | 
|  | 1565 | if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { | 
|  | 1566 | if (iucv->transport == AF_IUCV_TRANS_IUCV) { | 
|  | 1567 | txmsg.class = 0; | 
|  | 1568 | txmsg.tag = 0; | 
|  | 1569 | err = pr_iucv->message_send(iucv->path, &txmsg, | 
|  | 1570 | IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); | 
|  | 1571 | if (err) { | 
|  | 1572 | switch (err) { | 
|  | 1573 | case 1: | 
|  | 1574 | err = -ENOTCONN; | 
|  | 1575 | break; | 
|  | 1576 | case 2: | 
|  | 1577 | err = -ECONNRESET; | 
|  | 1578 | break; | 
|  | 1579 | default: | 
|  | 1580 | err = -ENOTCONN; | 
|  | 1581 | break; | 
|  | 1582 | } | 
|  | 1583 | } | 
|  | 1584 | } else | 
|  | 1585 | iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); | 
|  | 1586 | } | 
|  | 1587 |  | 
|  | 1588 | sk->sk_shutdown |= how; | 
|  | 1589 | if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { | 
|  | 1590 | if ((iucv->transport == AF_IUCV_TRANS_IUCV) && | 
|  | 1591 | iucv->path) { | 
|  | 1592 | err = pr_iucv->path_quiesce(iucv->path, NULL); | 
|  | 1593 | if (err) | 
|  | 1594 | err = -ENOTCONN; | 
|  | 1595 | /*			skb_queue_purge(&sk->sk_receive_queue); */ | 
|  | 1596 | } | 
|  | 1597 | skb_queue_purge(&sk->sk_receive_queue); | 
|  | 1598 | } | 
|  | 1599 |  | 
|  | 1600 | /* Wake up anyone sleeping in poll */ | 
|  | 1601 | sk->sk_state_change(sk); | 
|  | 1602 |  | 
|  | 1603 | fail: | 
|  | 1604 | release_sock(sk); | 
|  | 1605 | return err; | 
|  | 1606 | } | 
|  | 1607 |  | 
|  | 1608 | static int iucv_sock_release(struct socket *sock) | 
|  | 1609 | { | 
|  | 1610 | struct sock *sk = sock->sk; | 
|  | 1611 | int err = 0; | 
|  | 1612 |  | 
|  | 1613 | if (!sk) | 
|  | 1614 | return 0; | 
|  | 1615 |  | 
|  | 1616 | iucv_sock_close(sk); | 
|  | 1617 |  | 
|  | 1618 | sock_orphan(sk); | 
|  | 1619 | iucv_sock_kill(sk); | 
|  | 1620 | return err; | 
|  | 1621 | } | 
|  | 1622 |  | 
|  | 1623 | /* getsockopt and setsockopt */ | 
|  | 1624 | static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, | 
|  | 1625 | char __user *optval, unsigned int optlen) | 
|  | 1626 | { | 
|  | 1627 | struct sock *sk = sock->sk; | 
|  | 1628 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1629 | int val; | 
|  | 1630 | int rc; | 
|  | 1631 |  | 
|  | 1632 | if (level != SOL_IUCV) | 
|  | 1633 | return -ENOPROTOOPT; | 
|  | 1634 |  | 
|  | 1635 | if (optlen < sizeof(int)) | 
|  | 1636 | return -EINVAL; | 
|  | 1637 |  | 
|  | 1638 | if (get_user(val, (int __user *) optval)) | 
|  | 1639 | return -EFAULT; | 
|  | 1640 |  | 
|  | 1641 | rc = 0; | 
|  | 1642 |  | 
|  | 1643 | lock_sock(sk); | 
|  | 1644 | switch (optname) { | 
|  | 1645 | case SO_IPRMDATA_MSG: | 
|  | 1646 | if (val) | 
|  | 1647 | iucv->flags |= IUCV_IPRMDATA; | 
|  | 1648 | else | 
|  | 1649 | iucv->flags &= ~IUCV_IPRMDATA; | 
|  | 1650 | break; | 
|  | 1651 | case SO_MSGLIMIT: | 
|  | 1652 | switch (sk->sk_state) { | 
|  | 1653 | case IUCV_OPEN: | 
|  | 1654 | case IUCV_BOUND: | 
|  | 1655 | if (val < 1 || val > (u16)(~0)) | 
|  | 1656 | rc = -EINVAL; | 
|  | 1657 | else | 
|  | 1658 | iucv->msglimit = val; | 
|  | 1659 | break; | 
|  | 1660 | default: | 
|  | 1661 | rc = -EINVAL; | 
|  | 1662 | break; | 
|  | 1663 | } | 
|  | 1664 | break; | 
|  | 1665 | default: | 
|  | 1666 | rc = -ENOPROTOOPT; | 
|  | 1667 | break; | 
|  | 1668 | } | 
|  | 1669 | release_sock(sk); | 
|  | 1670 |  | 
|  | 1671 | return rc; | 
|  | 1672 | } | 
|  | 1673 |  | 
|  | 1674 | static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, | 
|  | 1675 | char __user *optval, int __user *optlen) | 
|  | 1676 | { | 
|  | 1677 | struct sock *sk = sock->sk; | 
|  | 1678 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1679 | unsigned int val; | 
|  | 1680 | int len; | 
|  | 1681 |  | 
|  | 1682 | if (level != SOL_IUCV) | 
|  | 1683 | return -ENOPROTOOPT; | 
|  | 1684 |  | 
|  | 1685 | if (get_user(len, optlen)) | 
|  | 1686 | return -EFAULT; | 
|  | 1687 |  | 
|  | 1688 | if (len < 0) | 
|  | 1689 | return -EINVAL; | 
|  | 1690 |  | 
|  | 1691 | len = min_t(unsigned int, len, sizeof(int)); | 
|  | 1692 |  | 
|  | 1693 | switch (optname) { | 
|  | 1694 | case SO_IPRMDATA_MSG: | 
|  | 1695 | val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; | 
|  | 1696 | break; | 
|  | 1697 | case SO_MSGLIMIT: | 
|  | 1698 | lock_sock(sk); | 
|  | 1699 | val = (iucv->path != NULL) ? iucv->path->msglim	/* connected */ | 
|  | 1700 | : iucv->msglimit;	/* default */ | 
|  | 1701 | release_sock(sk); | 
|  | 1702 | break; | 
|  | 1703 | case SO_MSGSIZE: | 
|  | 1704 | if (sk->sk_state == IUCV_OPEN) | 
|  | 1705 | return -EBADFD; | 
|  | 1706 | val = (iucv->hs_dev) ? iucv->hs_dev->mtu - | 
|  | 1707 | sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : | 
|  | 1708 | 0x7fffffff; | 
|  | 1709 | break; | 
|  | 1710 | default: | 
|  | 1711 | return -ENOPROTOOPT; | 
|  | 1712 | } | 
|  | 1713 |  | 
|  | 1714 | if (put_user(len, optlen)) | 
|  | 1715 | return -EFAULT; | 
|  | 1716 | if (copy_to_user(optval, &val, len)) | 
|  | 1717 | return -EFAULT; | 
|  | 1718 |  | 
|  | 1719 | return 0; | 
|  | 1720 | } | 
|  | 1721 |  | 
|  | 1722 |  | 
|  | 1723 | /* Callback wrappers - called from iucv base support */ | 
|  | 1724 | static int iucv_callback_connreq(struct iucv_path *path, | 
|  | 1725 | u8 ipvmid[8], u8 ipuser[16]) | 
|  | 1726 | { | 
|  | 1727 | unsigned char user_data[16]; | 
|  | 1728 | unsigned char nuser_data[16]; | 
|  | 1729 | unsigned char src_name[8]; | 
|  | 1730 | struct sock *sk, *nsk; | 
|  | 1731 | struct iucv_sock *iucv, *niucv; | 
|  | 1732 | int err; | 
|  | 1733 |  | 
|  | 1734 | memcpy(src_name, ipuser, 8); | 
|  | 1735 | EBCASC(src_name, 8); | 
|  | 1736 | /* Find out if this path belongs to af_iucv. */ | 
|  | 1737 | read_lock(&iucv_sk_list.lock); | 
|  | 1738 | iucv = NULL; | 
|  | 1739 | sk = NULL; | 
|  | 1740 | sk_for_each(sk, &iucv_sk_list.head) | 
|  | 1741 | if (sk->sk_state == IUCV_LISTEN && | 
|  | 1742 | !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { | 
|  | 1743 | /* | 
|  | 1744 | * Found a listening socket with | 
|  | 1745 | * src_name == ipuser[0-7]. | 
|  | 1746 | */ | 
|  | 1747 | iucv = iucv_sk(sk); | 
|  | 1748 | break; | 
|  | 1749 | } | 
|  | 1750 | read_unlock(&iucv_sk_list.lock); | 
|  | 1751 | if (!iucv) | 
|  | 1752 | /* No socket found, not one of our paths. */ | 
|  | 1753 | return -EINVAL; | 
|  | 1754 |  | 
|  | 1755 | bh_lock_sock(sk); | 
|  | 1756 |  | 
|  | 1757 | /* Check if parent socket is listening */ | 
|  | 1758 | low_nmcpy(user_data, iucv->src_name); | 
|  | 1759 | high_nmcpy(user_data, iucv->dst_name); | 
|  | 1760 | ASCEBC(user_data, sizeof(user_data)); | 
|  | 1761 | if (sk->sk_state != IUCV_LISTEN) { | 
|  | 1762 | err = pr_iucv->path_sever(path, user_data); | 
|  | 1763 | iucv_path_free(path); | 
|  | 1764 | goto fail; | 
|  | 1765 | } | 
|  | 1766 |  | 
|  | 1767 | /* Check for backlog size */ | 
|  | 1768 | if (sk_acceptq_is_full(sk)) { | 
|  | 1769 | err = pr_iucv->path_sever(path, user_data); | 
|  | 1770 | iucv_path_free(path); | 
|  | 1771 | goto fail; | 
|  | 1772 | } | 
|  | 1773 |  | 
|  | 1774 | /* Create the new socket */ | 
|  | 1775 | nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); | 
|  | 1776 | if (!nsk) { | 
|  | 1777 | err = pr_iucv->path_sever(path, user_data); | 
|  | 1778 | iucv_path_free(path); | 
|  | 1779 | goto fail; | 
|  | 1780 | } | 
|  | 1781 |  | 
|  | 1782 | niucv = iucv_sk(nsk); | 
|  | 1783 | iucv_sock_init(nsk, sk); | 
|  | 1784 |  | 
|  | 1785 | /* Set the new iucv_sock */ | 
|  | 1786 | memcpy(niucv->dst_name, ipuser + 8, 8); | 
|  | 1787 | EBCASC(niucv->dst_name, 8); | 
|  | 1788 | memcpy(niucv->dst_user_id, ipvmid, 8); | 
|  | 1789 | memcpy(niucv->src_name, iucv->src_name, 8); | 
|  | 1790 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | 
|  | 1791 | niucv->path = path; | 
|  | 1792 |  | 
|  | 1793 | /* Call iucv_accept */ | 
|  | 1794 | high_nmcpy(nuser_data, ipuser + 8); | 
|  | 1795 | memcpy(nuser_data + 8, niucv->src_name, 8); | 
|  | 1796 | ASCEBC(nuser_data + 8, 8); | 
|  | 1797 |  | 
|  | 1798 | /* set message limit for path based on msglimit of accepting socket */ | 
|  | 1799 | niucv->msglimit = iucv->msglimit; | 
|  | 1800 | path->msglim = iucv->msglimit; | 
|  | 1801 | err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); | 
|  | 1802 | if (err) { | 
|  | 1803 | iucv_sever_path(nsk, 1); | 
|  | 1804 | iucv_sock_kill(nsk); | 
|  | 1805 | goto fail; | 
|  | 1806 | } | 
|  | 1807 |  | 
|  | 1808 | iucv_accept_enqueue(sk, nsk); | 
|  | 1809 |  | 
|  | 1810 | /* Wake up accept */ | 
|  | 1811 | nsk->sk_state = IUCV_CONNECTED; | 
|  | 1812 | sk->sk_data_ready(sk); | 
|  | 1813 | err = 0; | 
|  | 1814 | fail: | 
|  | 1815 | bh_unlock_sock(sk); | 
|  | 1816 | return 0; | 
|  | 1817 | } | 
|  | 1818 |  | 
|  | 1819 | static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) | 
|  | 1820 | { | 
|  | 1821 | struct sock *sk = path->private; | 
|  | 1822 |  | 
|  | 1823 | sk->sk_state = IUCV_CONNECTED; | 
|  | 1824 | sk->sk_state_change(sk); | 
|  | 1825 | } | 
|  | 1826 |  | 
|  | 1827 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | 
|  | 1828 | { | 
|  | 1829 | struct sock *sk = path->private; | 
|  | 1830 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 1831 | struct sk_buff *skb; | 
|  | 1832 | struct sock_msg_q *save_msg; | 
|  | 1833 | int len; | 
|  | 1834 |  | 
|  | 1835 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 
|  | 1836 | pr_iucv->message_reject(path, msg); | 
|  | 1837 | return; | 
|  | 1838 | } | 
|  | 1839 |  | 
|  | 1840 | spin_lock(&iucv->message_q.lock); | 
|  | 1841 |  | 
|  | 1842 | if (!list_empty(&iucv->message_q.list) || | 
|  | 1843 | !skb_queue_empty(&iucv->backlog_skb_q)) | 
|  | 1844 | goto save_message; | 
|  | 1845 |  | 
|  | 1846 | len = atomic_read(&sk->sk_rmem_alloc); | 
|  | 1847 | len += SKB_TRUESIZE(iucv_msg_length(msg)); | 
|  | 1848 | if (len > sk->sk_rcvbuf) | 
|  | 1849 | goto save_message; | 
|  | 1850 |  | 
|  | 1851 | skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); | 
|  | 1852 | if (!skb) | 
|  | 1853 | goto save_message; | 
|  | 1854 |  | 
|  | 1855 | iucv_process_message(sk, skb, path, msg); | 
|  | 1856 | goto out_unlock; | 
|  | 1857 |  | 
|  | 1858 | save_message: | 
|  | 1859 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); | 
|  | 1860 | if (!save_msg) | 
|  | 1861 | goto out_unlock; | 
|  | 1862 | save_msg->path = path; | 
|  | 1863 | save_msg->msg = *msg; | 
|  | 1864 |  | 
|  | 1865 | list_add_tail(&save_msg->list, &iucv->message_q.list); | 
|  | 1866 |  | 
|  | 1867 | out_unlock: | 
|  | 1868 | spin_unlock(&iucv->message_q.lock); | 
|  | 1869 | } | 
|  | 1870 |  | 
|  | 1871 | static void iucv_callback_txdone(struct iucv_path *path, | 
|  | 1872 | struct iucv_message *msg) | 
|  | 1873 | { | 
|  | 1874 | struct sock *sk = path->private; | 
|  | 1875 | struct sk_buff *this = NULL; | 
|  | 1876 | struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; | 
|  | 1877 | struct sk_buff *list_skb = list->next; | 
|  | 1878 | unsigned long flags; | 
|  | 1879 |  | 
|  | 1880 | bh_lock_sock(sk); | 
|  | 1881 | if (!skb_queue_empty(list)) { | 
|  | 1882 | spin_lock_irqsave(&list->lock, flags); | 
|  | 1883 |  | 
|  | 1884 | while (list_skb != (struct sk_buff *)list) { | 
|  | 1885 | if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { | 
|  | 1886 | this = list_skb; | 
|  | 1887 | break; | 
|  | 1888 | } | 
|  | 1889 | list_skb = list_skb->next; | 
|  | 1890 | } | 
|  | 1891 | if (this) | 
|  | 1892 | __skb_unlink(this, list); | 
|  | 1893 |  | 
|  | 1894 | spin_unlock_irqrestore(&list->lock, flags); | 
|  | 1895 |  | 
|  | 1896 | if (this) { | 
|  | 1897 | kfree_skb(this); | 
|  | 1898 | /* wake up any process waiting for sending */ | 
|  | 1899 | iucv_sock_wake_msglim(sk); | 
|  | 1900 | } | 
|  | 1901 | } | 
|  | 1902 |  | 
|  | 1903 | if (sk->sk_state == IUCV_CLOSING) { | 
|  | 1904 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | 
|  | 1905 | sk->sk_state = IUCV_CLOSED; | 
|  | 1906 | sk->sk_state_change(sk); | 
|  | 1907 | } | 
|  | 1908 | } | 
|  | 1909 | bh_unlock_sock(sk); | 
|  | 1910 |  | 
|  | 1911 | } | 
|  | 1912 |  | 
|  | 1913 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) | 
|  | 1914 | { | 
|  | 1915 | struct sock *sk = path->private; | 
|  | 1916 |  | 
|  | 1917 | if (sk->sk_state == IUCV_CLOSED) | 
|  | 1918 | return; | 
|  | 1919 |  | 
|  | 1920 | bh_lock_sock(sk); | 
|  | 1921 | iucv_sever_path(sk, 1); | 
|  | 1922 | sk->sk_state = IUCV_DISCONN; | 
|  | 1923 |  | 
|  | 1924 | sk->sk_state_change(sk); | 
|  | 1925 | bh_unlock_sock(sk); | 
|  | 1926 | } | 
|  | 1927 |  | 
|  | 1928 | /* called if the other communication side shuts down its RECV direction; | 
|  | 1929 | * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. | 
|  | 1930 | */ | 
|  | 1931 | static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) | 
|  | 1932 | { | 
|  | 1933 | struct sock *sk = path->private; | 
|  | 1934 |  | 
|  | 1935 | bh_lock_sock(sk); | 
|  | 1936 | if (sk->sk_state != IUCV_CLOSED) { | 
|  | 1937 | sk->sk_shutdown |= SEND_SHUTDOWN; | 
|  | 1938 | sk->sk_state_change(sk); | 
|  | 1939 | } | 
|  | 1940 | bh_unlock_sock(sk); | 
|  | 1941 | } | 
|  | 1942 |  | 
|  | 1943 | /***************** HiperSockets transport callbacks ********************/ | 
|  | 1944 | static void afiucv_swap_src_dest(struct sk_buff *skb) | 
|  | 1945 | { | 
|  | 1946 | struct af_iucv_trans_hdr *trans_hdr = | 
|  | 1947 | (struct af_iucv_trans_hdr *)skb->data; | 
|  | 1948 | char tmpID[8]; | 
|  | 1949 | char tmpName[8]; | 
|  | 1950 |  | 
|  | 1951 | ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); | 
|  | 1952 | ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); | 
|  | 1953 | ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); | 
|  | 1954 | ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); | 
|  | 1955 | memcpy(tmpID, trans_hdr->srcUserID, 8); | 
|  | 1956 | memcpy(tmpName, trans_hdr->srcAppName, 8); | 
|  | 1957 | memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); | 
|  | 1958 | memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); | 
|  | 1959 | memcpy(trans_hdr->destUserID, tmpID, 8); | 
|  | 1960 | memcpy(trans_hdr->destAppName, tmpName, 8); | 
|  | 1961 | skb_push(skb, ETH_HLEN); | 
|  | 1962 | memset(skb->data, 0, ETH_HLEN); | 
|  | 1963 | } | 
|  | 1964 |  | 
|  | 1965 | /** | 
|  | 1966 | * afiucv_hs_callback_syn - react on received SYN | 
|  | 1967 | **/ | 
|  | 1968 | static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) | 
|  | 1969 | { | 
|  | 1970 | struct sock *nsk; | 
|  | 1971 | struct iucv_sock *iucv, *niucv; | 
|  | 1972 | struct af_iucv_trans_hdr *trans_hdr; | 
|  | 1973 | int err; | 
|  | 1974 |  | 
|  | 1975 | iucv = iucv_sk(sk); | 
|  | 1976 | trans_hdr = (struct af_iucv_trans_hdr *)skb->data; | 
|  | 1977 | if (!iucv) { | 
|  | 1978 | /* no sock - connection refused */ | 
|  | 1979 | afiucv_swap_src_dest(skb); | 
|  | 1980 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; | 
|  | 1981 | err = dev_queue_xmit(skb); | 
|  | 1982 | goto out; | 
|  | 1983 | } | 
|  | 1984 |  | 
|  | 1985 | nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); | 
|  | 1986 | bh_lock_sock(sk); | 
|  | 1987 | if ((sk->sk_state != IUCV_LISTEN) || | 
|  | 1988 | sk_acceptq_is_full(sk) || | 
|  | 1989 | !nsk) { | 
|  | 1990 | /* error on server socket - connection refused */ | 
|  | 1991 | afiucv_swap_src_dest(skb); | 
|  | 1992 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; | 
|  | 1993 | err = dev_queue_xmit(skb); | 
|  | 1994 | iucv_sock_kill(nsk); | 
|  | 1995 | bh_unlock_sock(sk); | 
|  | 1996 | goto out; | 
|  | 1997 | } | 
|  | 1998 |  | 
|  | 1999 | niucv = iucv_sk(nsk); | 
|  | 2000 | iucv_sock_init(nsk, sk); | 
|  | 2001 | niucv->transport = AF_IUCV_TRANS_HIPER; | 
|  | 2002 | niucv->msglimit = iucv->msglimit; | 
|  | 2003 | if (!trans_hdr->window) | 
|  | 2004 | niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; | 
|  | 2005 | else | 
|  | 2006 | niucv->msglimit_peer = trans_hdr->window; | 
|  | 2007 | memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); | 
|  | 2008 | memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); | 
|  | 2009 | memcpy(niucv->src_name, iucv->src_name, 8); | 
|  | 2010 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | 
|  | 2011 | nsk->sk_bound_dev_if = sk->sk_bound_dev_if; | 
|  | 2012 | niucv->hs_dev = iucv->hs_dev; | 
|  | 2013 | dev_hold(niucv->hs_dev); | 
|  | 2014 | afiucv_swap_src_dest(skb); | 
|  | 2015 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; | 
|  | 2016 | trans_hdr->window = niucv->msglimit; | 
|  | 2017 | /* if receiver acks the xmit connection is established */ | 
|  | 2018 | err = dev_queue_xmit(skb); | 
|  | 2019 | if (!err) { | 
|  | 2020 | iucv_accept_enqueue(sk, nsk); | 
|  | 2021 | nsk->sk_state = IUCV_CONNECTED; | 
|  | 2022 | sk->sk_data_ready(sk); | 
|  | 2023 | } else | 
|  | 2024 | iucv_sock_kill(nsk); | 
|  | 2025 | bh_unlock_sock(sk); | 
|  | 2026 |  | 
|  | 2027 | out: | 
|  | 2028 | return NET_RX_SUCCESS; | 
|  | 2029 | } | 
|  | 2030 |  | 
|  | 2031 | /** | 
|  | 2032 | * afiucv_hs_callback_synack() - react on received SYN-ACK | 
|  | 2033 | **/ | 
|  | 2034 | static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) | 
|  | 2035 | { | 
|  | 2036 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 2037 | struct af_iucv_trans_hdr *trans_hdr = | 
|  | 2038 | (struct af_iucv_trans_hdr *)skb->data; | 
|  | 2039 |  | 
|  | 2040 | if (!iucv) | 
|  | 2041 | goto out; | 
|  | 2042 | if (sk->sk_state != IUCV_BOUND) | 
|  | 2043 | goto out; | 
|  | 2044 | bh_lock_sock(sk); | 
|  | 2045 | iucv->msglimit_peer = trans_hdr->window; | 
|  | 2046 | sk->sk_state = IUCV_CONNECTED; | 
|  | 2047 | sk->sk_state_change(sk); | 
|  | 2048 | bh_unlock_sock(sk); | 
|  | 2049 | out: | 
|  | 2050 | kfree_skb(skb); | 
|  | 2051 | return NET_RX_SUCCESS; | 
|  | 2052 | } | 
|  | 2053 |  | 
|  | 2054 | /** | 
|  | 2055 | * afiucv_hs_callback_synfin() - react on received SYN_FIN | 
|  | 2056 | **/ | 
|  | 2057 | static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) | 
|  | 2058 | { | 
|  | 2059 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 2060 |  | 
|  | 2061 | if (!iucv) | 
|  | 2062 | goto out; | 
|  | 2063 | if (sk->sk_state != IUCV_BOUND) | 
|  | 2064 | goto out; | 
|  | 2065 | bh_lock_sock(sk); | 
|  | 2066 | sk->sk_state = IUCV_DISCONN; | 
|  | 2067 | sk->sk_state_change(sk); | 
|  | 2068 | bh_unlock_sock(sk); | 
|  | 2069 | out: | 
|  | 2070 | kfree_skb(skb); | 
|  | 2071 | return NET_RX_SUCCESS; | 
|  | 2072 | } | 
|  | 2073 |  | 
|  | 2074 | /** | 
|  | 2075 | * afiucv_hs_callback_fin() - react on received FIN | 
|  | 2076 | **/ | 
|  | 2077 | static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) | 
|  | 2078 | { | 
|  | 2079 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 2080 |  | 
|  | 2081 | /* other end of connection closed */ | 
|  | 2082 | if (!iucv) | 
|  | 2083 | goto out; | 
|  | 2084 | bh_lock_sock(sk); | 
|  | 2085 | if (sk->sk_state == IUCV_CONNECTED) { | 
|  | 2086 | sk->sk_state = IUCV_DISCONN; | 
|  | 2087 | sk->sk_state_change(sk); | 
|  | 2088 | } | 
|  | 2089 | bh_unlock_sock(sk); | 
|  | 2090 | out: | 
|  | 2091 | kfree_skb(skb); | 
|  | 2092 | return NET_RX_SUCCESS; | 
|  | 2093 | } | 
|  | 2094 |  | 
|  | 2095 | /** | 
|  | 2096 | * afiucv_hs_callback_win() - react on received WIN | 
|  | 2097 | **/ | 
|  | 2098 | static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) | 
|  | 2099 | { | 
|  | 2100 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 2101 | struct af_iucv_trans_hdr *trans_hdr = | 
|  | 2102 | (struct af_iucv_trans_hdr *)skb->data; | 
|  | 2103 |  | 
|  | 2104 | if (!iucv) | 
|  | 2105 | return NET_RX_SUCCESS; | 
|  | 2106 |  | 
|  | 2107 | if (sk->sk_state != IUCV_CONNECTED) | 
|  | 2108 | return NET_RX_SUCCESS; | 
|  | 2109 |  | 
|  | 2110 | atomic_sub(trans_hdr->window, &iucv->msg_sent); | 
|  | 2111 | iucv_sock_wake_msglim(sk); | 
|  | 2112 | return NET_RX_SUCCESS; | 
|  | 2113 | } | 
|  | 2114 |  | 
|  | 2115 | /** | 
|  | 2116 | * afiucv_hs_callback_rx() - react on received data | 
|  | 2117 | **/ | 
|  | 2118 | static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | 
|  | 2119 | { | 
|  | 2120 | struct iucv_sock *iucv = iucv_sk(sk); | 
|  | 2121 |  | 
|  | 2122 | if (!iucv) { | 
|  | 2123 | kfree_skb(skb); | 
|  | 2124 | return NET_RX_SUCCESS; | 
|  | 2125 | } | 
|  | 2126 |  | 
|  | 2127 | if (sk->sk_state != IUCV_CONNECTED) { | 
|  | 2128 | kfree_skb(skb); | 
|  | 2129 | return NET_RX_SUCCESS; | 
|  | 2130 | } | 
|  | 2131 |  | 
|  | 2132 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 
|  | 2133 | kfree_skb(skb); | 
|  | 2134 | return NET_RX_SUCCESS; | 
|  | 2135 | } | 
|  | 2136 |  | 
|  | 2137 | /* write stuff from iucv_msg to skb cb */ | 
|  | 2138 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); | 
|  | 2139 | skb_reset_transport_header(skb); | 
|  | 2140 | skb_reset_network_header(skb); | 
|  | 2141 | IUCV_SKB_CB(skb)->offset = 0; | 
|  | 2142 | if (sk_filter(sk, skb)) { | 
|  | 2143 | atomic_inc(&sk->sk_drops);	/* skb rejected by filter */ | 
|  | 2144 | kfree_skb(skb); | 
|  | 2145 | return NET_RX_SUCCESS; | 
|  | 2146 | } | 
|  | 2147 |  | 
|  | 2148 | spin_lock(&iucv->message_q.lock); | 
|  | 2149 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | 
|  | 2150 | if (__sock_queue_rcv_skb(sk, skb)) | 
|  | 2151 | /* handle rcv queue full */ | 
|  | 2152 | skb_queue_tail(&iucv->backlog_skb_q, skb); | 
|  | 2153 | } else | 
|  | 2154 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | 
|  | 2155 | spin_unlock(&iucv->message_q.lock); | 
|  | 2156 | return NET_RX_SUCCESS; | 
|  | 2157 | } | 
|  | 2158 |  | 
|  | 2159 | /** | 
|  | 2160 | * afiucv_hs_rcv() - base function for arriving data through HiperSockets | 
|  | 2161 | *                   transport | 
|  | 2162 | *                   called from netif RX softirq | 
|  | 2163 | **/ | 
|  | 2164 | static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | 
|  | 2165 | struct packet_type *pt, struct net_device *orig_dev) | 
|  | 2166 | { | 
|  | 2167 | struct sock *sk; | 
|  | 2168 | struct iucv_sock *iucv; | 
|  | 2169 | struct af_iucv_trans_hdr *trans_hdr; | 
|  | 2170 | int err = NET_RX_SUCCESS; | 
|  | 2171 | char nullstring[8]; | 
|  | 2172 |  | 
|  | 2173 | if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { | 
|  | 2174 | WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", | 
|  | 2175 | (int)skb->len, | 
|  | 2176 | (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr))); | 
|  | 2177 | kfree_skb(skb); | 
|  | 2178 | return NET_RX_SUCCESS; | 
|  | 2179 | } | 
|  | 2180 | if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) | 
|  | 2181 | if (skb_linearize(skb)) { | 
|  | 2182 | WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d", | 
|  | 2183 | (int)skb->len); | 
|  | 2184 | kfree_skb(skb); | 
|  | 2185 | return NET_RX_SUCCESS; | 
|  | 2186 | } | 
|  | 2187 | skb_pull(skb, ETH_HLEN); | 
|  | 2188 | trans_hdr = (struct af_iucv_trans_hdr *)skb->data; | 
|  | 2189 | EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); | 
|  | 2190 | EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); | 
|  | 2191 | EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); | 
|  | 2192 | EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); | 
|  | 2193 | memset(nullstring, 0, sizeof(nullstring)); | 
|  | 2194 | iucv = NULL; | 
|  | 2195 | sk = NULL; | 
|  | 2196 | read_lock(&iucv_sk_list.lock); | 
|  | 2197 | sk_for_each(sk, &iucv_sk_list.head) { | 
|  | 2198 | if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { | 
|  | 2199 | if ((!memcmp(&iucv_sk(sk)->src_name, | 
|  | 2200 | trans_hdr->destAppName, 8)) && | 
|  | 2201 | (!memcmp(&iucv_sk(sk)->src_user_id, | 
|  | 2202 | trans_hdr->destUserID, 8)) && | 
|  | 2203 | (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && | 
|  | 2204 | (!memcmp(&iucv_sk(sk)->dst_user_id, | 
|  | 2205 | nullstring, 8))) { | 
|  | 2206 | iucv = iucv_sk(sk); | 
|  | 2207 | break; | 
|  | 2208 | } | 
|  | 2209 | } else { | 
|  | 2210 | if ((!memcmp(&iucv_sk(sk)->src_name, | 
|  | 2211 | trans_hdr->destAppName, 8)) && | 
|  | 2212 | (!memcmp(&iucv_sk(sk)->src_user_id, | 
|  | 2213 | trans_hdr->destUserID, 8)) && | 
|  | 2214 | (!memcmp(&iucv_sk(sk)->dst_name, | 
|  | 2215 | trans_hdr->srcAppName, 8)) && | 
|  | 2216 | (!memcmp(&iucv_sk(sk)->dst_user_id, | 
|  | 2217 | trans_hdr->srcUserID, 8))) { | 
|  | 2218 | iucv = iucv_sk(sk); | 
|  | 2219 | break; | 
|  | 2220 | } | 
|  | 2221 | } | 
|  | 2222 | } | 
|  | 2223 | read_unlock(&iucv_sk_list.lock); | 
|  | 2224 | if (!iucv) | 
|  | 2225 | sk = NULL; | 
|  | 2226 |  | 
|  | 2227 | /* no sock | 
|  | 2228 | how should we send with no sock | 
|  | 2229 | 1) send without sock no send rc checking? | 
|  | 2230 | 2) introduce default sock to handle this cases | 
|  | 2231 |  | 
|  | 2232 | SYN -> send SYN|ACK in good case, send SYN|FIN in bad case | 
|  | 2233 | data -> send FIN | 
|  | 2234 | SYN|ACK, SYN|FIN, FIN -> no action? */ | 
|  | 2235 |  | 
|  | 2236 | switch (trans_hdr->flags) { | 
|  | 2237 | case AF_IUCV_FLAG_SYN: | 
|  | 2238 | /* connect request */ | 
|  | 2239 | err = afiucv_hs_callback_syn(sk, skb); | 
|  | 2240 | break; | 
|  | 2241 | case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): | 
|  | 2242 | /* connect request confirmed */ | 
|  | 2243 | err = afiucv_hs_callback_synack(sk, skb); | 
|  | 2244 | break; | 
|  | 2245 | case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): | 
|  | 2246 | /* connect request refused */ | 
|  | 2247 | err = afiucv_hs_callback_synfin(sk, skb); | 
|  | 2248 | break; | 
|  | 2249 | case (AF_IUCV_FLAG_FIN): | 
|  | 2250 | /* close request */ | 
|  | 2251 | err = afiucv_hs_callback_fin(sk, skb); | 
|  | 2252 | break; | 
|  | 2253 | case (AF_IUCV_FLAG_WIN): | 
|  | 2254 | err = afiucv_hs_callback_win(sk, skb); | 
|  | 2255 | if (skb->len == sizeof(struct af_iucv_trans_hdr)) { | 
|  | 2256 | kfree_skb(skb); | 
|  | 2257 | break; | 
|  | 2258 | } | 
|  | 2259 | /* fall through and receive non-zero length data */ | 
|  | 2260 | case (AF_IUCV_FLAG_SHT): | 
|  | 2261 | /* shutdown request */ | 
|  | 2262 | /* fall through and receive zero length data */ | 
|  | 2263 | case 0: | 
|  | 2264 | /* plain data frame */ | 
|  | 2265 | IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; | 
|  | 2266 | err = afiucv_hs_callback_rx(sk, skb); | 
|  | 2267 | break; | 
|  | 2268 | default: | 
|  | 2269 | kfree_skb(skb); | 
|  | 2270 | } | 
|  | 2271 |  | 
|  | 2272 | return err; | 
|  | 2273 | } | 
|  | 2274 |  | 
|  | 2275 | /** | 
|  | 2276 | * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets | 
|  | 2277 | *                                 transport | 
|  | 2278 | **/ | 
|  | 2279 | static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | 
|  | 2280 | enum iucv_tx_notify n) | 
|  | 2281 | { | 
|  | 2282 | struct sock *isk = skb->sk; | 
|  | 2283 | struct sock *sk = NULL; | 
|  | 2284 | struct iucv_sock *iucv = NULL; | 
|  | 2285 | struct sk_buff_head *list; | 
|  | 2286 | struct sk_buff *list_skb; | 
|  | 2287 | struct sk_buff *nskb; | 
|  | 2288 | unsigned long flags; | 
|  | 2289 |  | 
|  | 2290 | read_lock_irqsave(&iucv_sk_list.lock, flags); | 
|  | 2291 | sk_for_each(sk, &iucv_sk_list.head) | 
|  | 2292 | if (sk == isk) { | 
|  | 2293 | iucv = iucv_sk(sk); | 
|  | 2294 | break; | 
|  | 2295 | } | 
|  | 2296 | read_unlock_irqrestore(&iucv_sk_list.lock, flags); | 
|  | 2297 |  | 
|  | 2298 | if (!iucv || sock_flag(sk, SOCK_ZAPPED)) | 
|  | 2299 | return; | 
|  | 2300 |  | 
|  | 2301 | list = &iucv->send_skb_q; | 
|  | 2302 | spin_lock_irqsave(&list->lock, flags); | 
|  | 2303 | if (skb_queue_empty(list)) | 
|  | 2304 | goto out_unlock; | 
|  | 2305 | list_skb = list->next; | 
|  | 2306 | nskb = list_skb->next; | 
|  | 2307 | while (list_skb != (struct sk_buff *)list) { | 
|  | 2308 | if (skb_shinfo(list_skb) == skb_shinfo(skb)) { | 
|  | 2309 | switch (n) { | 
|  | 2310 | case TX_NOTIFY_OK: | 
|  | 2311 | __skb_unlink(list_skb, list); | 
|  | 2312 | kfree_skb(list_skb); | 
|  | 2313 | iucv_sock_wake_msglim(sk); | 
|  | 2314 | break; | 
|  | 2315 | case TX_NOTIFY_PENDING: | 
|  | 2316 | atomic_inc(&iucv->pendings); | 
|  | 2317 | break; | 
|  | 2318 | case TX_NOTIFY_DELAYED_OK: | 
|  | 2319 | __skb_unlink(list_skb, list); | 
|  | 2320 | atomic_dec(&iucv->pendings); | 
|  | 2321 | if (atomic_read(&iucv->pendings) <= 0) | 
|  | 2322 | iucv_sock_wake_msglim(sk); | 
|  | 2323 | kfree_skb(list_skb); | 
|  | 2324 | break; | 
|  | 2325 | case TX_NOTIFY_UNREACHABLE: | 
|  | 2326 | case TX_NOTIFY_DELAYED_UNREACHABLE: | 
|  | 2327 | case TX_NOTIFY_TPQFULL: /* not yet used */ | 
|  | 2328 | case TX_NOTIFY_GENERALERROR: | 
|  | 2329 | case TX_NOTIFY_DELAYED_GENERALERROR: | 
|  | 2330 | __skb_unlink(list_skb, list); | 
|  | 2331 | kfree_skb(list_skb); | 
|  | 2332 | if (sk->sk_state == IUCV_CONNECTED) { | 
|  | 2333 | sk->sk_state = IUCV_DISCONN; | 
|  | 2334 | sk->sk_state_change(sk); | 
|  | 2335 | } | 
|  | 2336 | break; | 
|  | 2337 | } | 
|  | 2338 | break; | 
|  | 2339 | } | 
|  | 2340 | list_skb = nskb; | 
|  | 2341 | nskb = nskb->next; | 
|  | 2342 | } | 
|  | 2343 | out_unlock: | 
|  | 2344 | spin_unlock_irqrestore(&list->lock, flags); | 
|  | 2345 |  | 
|  | 2346 | if (sk->sk_state == IUCV_CLOSING) { | 
|  | 2347 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | 
|  | 2348 | sk->sk_state = IUCV_CLOSED; | 
|  | 2349 | sk->sk_state_change(sk); | 
|  | 2350 | } | 
|  | 2351 | } | 
|  | 2352 |  | 
|  | 2353 | } | 
|  | 2354 |  | 
|  | 2355 | /* | 
|  | 2356 | * afiucv_netdev_event: handle netdev notifier chain events | 
|  | 2357 | */ | 
|  | 2358 | static int afiucv_netdev_event(struct notifier_block *this, | 
|  | 2359 | unsigned long event, void *ptr) | 
|  | 2360 | { | 
|  | 2361 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); | 
|  | 2362 | struct sock *sk; | 
|  | 2363 | struct iucv_sock *iucv; | 
|  | 2364 |  | 
|  | 2365 | switch (event) { | 
|  | 2366 | case NETDEV_REBOOT: | 
|  | 2367 | case NETDEV_GOING_DOWN: | 
|  | 2368 | sk_for_each(sk, &iucv_sk_list.head) { | 
|  | 2369 | iucv = iucv_sk(sk); | 
|  | 2370 | if ((iucv->hs_dev == event_dev) && | 
|  | 2371 | (sk->sk_state == IUCV_CONNECTED)) { | 
|  | 2372 | if (event == NETDEV_GOING_DOWN) | 
|  | 2373 | iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); | 
|  | 2374 | sk->sk_state = IUCV_DISCONN; | 
|  | 2375 | sk->sk_state_change(sk); | 
|  | 2376 | } | 
|  | 2377 | } | 
|  | 2378 | break; | 
|  | 2379 | case NETDEV_DOWN: | 
|  | 2380 | case NETDEV_UNREGISTER: | 
|  | 2381 | default: | 
|  | 2382 | break; | 
|  | 2383 | } | 
|  | 2384 | return NOTIFY_DONE; | 
|  | 2385 | } | 
|  | 2386 |  | 
|  | 2387 | static struct notifier_block afiucv_netdev_notifier = { | 
|  | 2388 | .notifier_call = afiucv_netdev_event, | 
|  | 2389 | }; | 
|  | 2390 |  | 
|  | 2391 | static const struct proto_ops iucv_sock_ops = { | 
|  | 2392 | .family		= PF_IUCV, | 
|  | 2393 | .owner		= THIS_MODULE, | 
|  | 2394 | .release	= iucv_sock_release, | 
|  | 2395 | .bind		= iucv_sock_bind, | 
|  | 2396 | .connect	= iucv_sock_connect, | 
|  | 2397 | .listen		= iucv_sock_listen, | 
|  | 2398 | .accept		= iucv_sock_accept, | 
|  | 2399 | .getname	= iucv_sock_getname, | 
|  | 2400 | .sendmsg	= iucv_sock_sendmsg, | 
|  | 2401 | .recvmsg	= iucv_sock_recvmsg, | 
|  | 2402 | .poll		= iucv_sock_poll, | 
|  | 2403 | .ioctl		= sock_no_ioctl, | 
|  | 2404 | .mmap		= sock_no_mmap, | 
|  | 2405 | .socketpair	= sock_no_socketpair, | 
|  | 2406 | .shutdown	= iucv_sock_shutdown, | 
|  | 2407 | .setsockopt	= iucv_sock_setsockopt, | 
|  | 2408 | .getsockopt	= iucv_sock_getsockopt, | 
|  | 2409 | }; | 
|  | 2410 |  | 
|  | 2411 | static const struct net_proto_family iucv_sock_family_ops = { | 
|  | 2412 | .family	= AF_IUCV, | 
|  | 2413 | .owner	= THIS_MODULE, | 
|  | 2414 | .create	= iucv_sock_create, | 
|  | 2415 | }; | 
|  | 2416 |  | 
|  | 2417 | static struct packet_type iucv_packet_type = { | 
|  | 2418 | .type = cpu_to_be16(ETH_P_AF_IUCV), | 
|  | 2419 | .func = afiucv_hs_rcv, | 
|  | 2420 | }; | 
|  | 2421 |  | 
|  | 2422 | static int afiucv_iucv_init(void) | 
|  | 2423 | { | 
|  | 2424 | int err; | 
|  | 2425 |  | 
|  | 2426 | err = pr_iucv->iucv_register(&af_iucv_handler, 0); | 
|  | 2427 | if (err) | 
|  | 2428 | goto out; | 
|  | 2429 | /* establish dummy device */ | 
|  | 2430 | af_iucv_driver.bus = pr_iucv->bus; | 
|  | 2431 | err = driver_register(&af_iucv_driver); | 
|  | 2432 | if (err) | 
|  | 2433 | goto out_iucv; | 
|  | 2434 | af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); | 
|  | 2435 | if (!af_iucv_dev) { | 
|  | 2436 | err = -ENOMEM; | 
|  | 2437 | goto out_driver; | 
|  | 2438 | } | 
|  | 2439 | dev_set_name(af_iucv_dev, "af_iucv"); | 
|  | 2440 | af_iucv_dev->bus = pr_iucv->bus; | 
|  | 2441 | af_iucv_dev->parent = pr_iucv->root; | 
|  | 2442 | af_iucv_dev->release = (void (*)(struct device *))kfree; | 
|  | 2443 | af_iucv_dev->driver = &af_iucv_driver; | 
|  | 2444 | err = device_register(af_iucv_dev); | 
|  | 2445 | if (err) | 
|  | 2446 | goto out_iucv_dev; | 
|  | 2447 | return 0; | 
|  | 2448 |  | 
|  | 2449 | out_iucv_dev: | 
|  | 2450 | put_device(af_iucv_dev); | 
|  | 2451 | out_driver: | 
|  | 2452 | driver_unregister(&af_iucv_driver); | 
|  | 2453 | out_iucv: | 
|  | 2454 | pr_iucv->iucv_unregister(&af_iucv_handler, 0); | 
|  | 2455 | out: | 
|  | 2456 | return err; | 
|  | 2457 | } | 
|  | 2458 |  | 
|  | 2459 | static int __init afiucv_init(void) | 
|  | 2460 | { | 
|  | 2461 | int err; | 
|  | 2462 |  | 
|  | 2463 | if (MACHINE_IS_VM) { | 
|  | 2464 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); | 
|  | 2465 | if (unlikely(err)) { | 
|  | 2466 | WARN_ON(err); | 
|  | 2467 | err = -EPROTONOSUPPORT; | 
|  | 2468 | goto out; | 
|  | 2469 | } | 
|  | 2470 |  | 
|  | 2471 | pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); | 
|  | 2472 | if (!pr_iucv) { | 
|  | 2473 | printk(KERN_WARNING "iucv_if lookup failed\n"); | 
|  | 2474 | memset(&iucv_userid, 0, sizeof(iucv_userid)); | 
|  | 2475 | } | 
|  | 2476 | } else { | 
|  | 2477 | memset(&iucv_userid, 0, sizeof(iucv_userid)); | 
|  | 2478 | pr_iucv = NULL; | 
|  | 2479 | } | 
|  | 2480 |  | 
|  | 2481 | err = proto_register(&iucv_proto, 0); | 
|  | 2482 | if (err) | 
|  | 2483 | goto out; | 
|  | 2484 | err = sock_register(&iucv_sock_family_ops); | 
|  | 2485 | if (err) | 
|  | 2486 | goto out_proto; | 
|  | 2487 |  | 
|  | 2488 | if (pr_iucv) { | 
|  | 2489 | err = afiucv_iucv_init(); | 
|  | 2490 | if (err) | 
|  | 2491 | goto out_sock; | 
|  | 2492 | } else | 
|  | 2493 | register_netdevice_notifier(&afiucv_netdev_notifier); | 
|  | 2494 | dev_add_pack(&iucv_packet_type); | 
|  | 2495 | return 0; | 
|  | 2496 |  | 
|  | 2497 | out_sock: | 
|  | 2498 | sock_unregister(PF_IUCV); | 
|  | 2499 | out_proto: | 
|  | 2500 | proto_unregister(&iucv_proto); | 
|  | 2501 | out: | 
|  | 2502 | if (pr_iucv) | 
|  | 2503 | symbol_put(iucv_if); | 
|  | 2504 | return err; | 
|  | 2505 | } | 
|  | 2506 |  | 
|  | 2507 | static void __exit afiucv_exit(void) | 
|  | 2508 | { | 
|  | 2509 | if (pr_iucv) { | 
|  | 2510 | device_unregister(af_iucv_dev); | 
|  | 2511 | driver_unregister(&af_iucv_driver); | 
|  | 2512 | pr_iucv->iucv_unregister(&af_iucv_handler, 0); | 
|  | 2513 | symbol_put(iucv_if); | 
|  | 2514 | } else | 
|  | 2515 | unregister_netdevice_notifier(&afiucv_netdev_notifier); | 
|  | 2516 | dev_remove_pack(&iucv_packet_type); | 
|  | 2517 | sock_unregister(PF_IUCV); | 
|  | 2518 | proto_unregister(&iucv_proto); | 
|  | 2519 | } | 
|  | 2520 |  | 
|  | 2521 | module_init(afiucv_init); | 
|  | 2522 | module_exit(afiucv_exit); | 
|  | 2523 |  | 
|  | 2524 | MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); | 
|  | 2525 | MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); | 
|  | 2526 | MODULE_VERSION(VERSION); | 
|  | 2527 | MODULE_LICENSE("GPL"); | 
|  | 2528 | MODULE_ALIAS_NETPROTO(PF_IUCV); |