| rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of the GNU General Public License as published by | 
|  | 6 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 7 | * (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write to the Free Software | 
|  | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA | 
|  | 17 | */ | 
|  | 18 |  | 
|  | 19 | /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS | 
|  | 20 | * and the service processor on IBM pSeries servers. On these servers, there | 
|  | 21 | * are no serial ports under the OS's control, and sometimes there is no other | 
|  | 22 | * console available either. However, the service processor has two standard | 
|  | 23 | * serial ports, so this over-complicated protocol allows the OS to control | 
|  | 24 | * those ports by proxy. | 
|  | 25 | * | 
|  | 26 | * Besides data, the procotol supports the reading/writing of the serial | 
|  | 27 | * port's DTR line, and the reading of the CD line. This is to allow the OS to | 
|  | 28 | * control a modem attached to the service processor's serial port. Note that | 
|  | 29 | * the OS cannot change the speed of the port through this protocol. | 
|  | 30 | */ | 
|  | 31 |  | 
|  | 32 | #undef DEBUG | 
|  | 33 |  | 
|  | 34 | #include <linux/console.h> | 
|  | 35 | #include <linux/ctype.h> | 
|  | 36 | #include <linux/delay.h> | 
|  | 37 | #include <linux/init.h> | 
|  | 38 | #include <linux/interrupt.h> | 
|  | 39 | #include <linux/module.h> | 
|  | 40 | #include <linux/major.h> | 
|  | 41 | #include <linux/kernel.h> | 
|  | 42 | #include <linux/spinlock.h> | 
|  | 43 | #include <linux/sysrq.h> | 
|  | 44 | #include <linux/tty.h> | 
|  | 45 | #include <linux/tty_flip.h> | 
|  | 46 | #include <asm/hvcall.h> | 
|  | 47 | #include <asm/hvconsole.h> | 
|  | 48 | #include <asm/prom.h> | 
|  | 49 | #include <linux/uaccess.h> | 
|  | 50 | #include <asm/vio.h> | 
|  | 51 | #include <asm/param.h> | 
|  | 52 | #include <asm/hvsi.h> | 
|  | 53 |  | 
|  | 54 | #define HVSI_MAJOR	229 | 
|  | 55 | #define HVSI_MINOR	128 | 
|  | 56 | #define MAX_NR_HVSI_CONSOLES 4 | 
|  | 57 |  | 
|  | 58 | #define HVSI_TIMEOUT (5*HZ) | 
|  | 59 | #define HVSI_VERSION 1 | 
|  | 60 | #define HVSI_MAX_PACKET 256 | 
|  | 61 | #define HVSI_MAX_READ 16 | 
|  | 62 | #define HVSI_MAX_OUTGOING_DATA 12 | 
|  | 63 | #define N_OUTBUF 12 | 
|  | 64 |  | 
|  | 65 | /* | 
|  | 66 | * we pass data via two 8-byte registers, so we would like our char arrays | 
|  | 67 | * properly aligned for those loads. | 
|  | 68 | */ | 
|  | 69 | #define __ALIGNED__	__attribute__((__aligned__(sizeof(long)))) | 
|  | 70 |  | 
|  | 71 | struct hvsi_struct { | 
|  | 72 | struct tty_port port; | 
|  | 73 | struct delayed_work writer; | 
|  | 74 | struct work_struct handshaker; | 
|  | 75 | wait_queue_head_t emptyq; /* woken when outbuf is emptied */ | 
|  | 76 | wait_queue_head_t stateq; /* woken when HVSI state changes */ | 
|  | 77 | spinlock_t lock; | 
|  | 78 | int index; | 
|  | 79 | uint8_t throttle_buf[128]; | 
|  | 80 | uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */ | 
|  | 81 | /* inbuf is for packet reassembly. leave a little room for leftovers. */ | 
|  | 82 | uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ]; | 
|  | 83 | uint8_t *inbuf_end; | 
|  | 84 | int n_throttle; | 
|  | 85 | int n_outbuf; | 
|  | 86 | uint32_t vtermno; | 
|  | 87 | uint32_t virq; | 
|  | 88 | atomic_t seqno; /* HVSI packet sequence number */ | 
|  | 89 | uint16_t mctrl; | 
|  | 90 | uint8_t state;  /* HVSI protocol state */ | 
|  | 91 | uint8_t flags; | 
|  | 92 | #ifdef CONFIG_MAGIC_SYSRQ | 
|  | 93 | uint8_t sysrq; | 
|  | 94 | #endif /* CONFIG_MAGIC_SYSRQ */ | 
|  | 95 | }; | 
|  | 96 | static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES]; | 
|  | 97 |  | 
|  | 98 | static struct tty_driver *hvsi_driver; | 
|  | 99 | static int hvsi_count; | 
|  | 100 | static int (*hvsi_wait)(struct hvsi_struct *hp, int state); | 
|  | 101 |  | 
|  | 102 | enum HVSI_PROTOCOL_STATE { | 
|  | 103 | HVSI_CLOSED, | 
|  | 104 | HVSI_WAIT_FOR_VER_RESPONSE, | 
|  | 105 | HVSI_WAIT_FOR_VER_QUERY, | 
|  | 106 | HVSI_OPEN, | 
|  | 107 | HVSI_WAIT_FOR_MCTRL_RESPONSE, | 
|  | 108 | HVSI_FSP_DIED, | 
|  | 109 | }; | 
|  | 110 | #define HVSI_CONSOLE 0x1 | 
|  | 111 |  | 
|  | 112 | static inline int is_console(struct hvsi_struct *hp) | 
|  | 113 | { | 
|  | 114 | return hp->flags & HVSI_CONSOLE; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | static inline int is_open(struct hvsi_struct *hp) | 
|  | 118 | { | 
|  | 119 | /* if we're waiting for an mctrl then we're already open */ | 
|  | 120 | return (hp->state == HVSI_OPEN) | 
|  | 121 | || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | static inline void print_state(struct hvsi_struct *hp) | 
|  | 125 | { | 
|  | 126 | #ifdef DEBUG | 
|  | 127 | static const char *state_names[] = { | 
|  | 128 | "HVSI_CLOSED", | 
|  | 129 | "HVSI_WAIT_FOR_VER_RESPONSE", | 
|  | 130 | "HVSI_WAIT_FOR_VER_QUERY", | 
|  | 131 | "HVSI_OPEN", | 
|  | 132 | "HVSI_WAIT_FOR_MCTRL_RESPONSE", | 
|  | 133 | "HVSI_FSP_DIED", | 
|  | 134 | }; | 
|  | 135 | const char *name = (hp->state < ARRAY_SIZE(state_names)) | 
|  | 136 | ? state_names[hp->state] : "UNKNOWN"; | 
|  | 137 |  | 
|  | 138 | pr_debug("hvsi%i: state = %s\n", hp->index, name); | 
|  | 139 | #endif /* DEBUG */ | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | static inline void __set_state(struct hvsi_struct *hp, int state) | 
|  | 143 | { | 
|  | 144 | hp->state = state; | 
|  | 145 | print_state(hp); | 
|  | 146 | wake_up_all(&hp->stateq); | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | static inline void set_state(struct hvsi_struct *hp, int state) | 
|  | 150 | { | 
|  | 151 | unsigned long flags; | 
|  | 152 |  | 
|  | 153 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 154 | __set_state(hp, state); | 
|  | 155 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | static inline int len_packet(const uint8_t *packet) | 
|  | 159 | { | 
|  | 160 | return (int)((struct hvsi_header *)packet)->len; | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | static inline int is_header(const uint8_t *packet) | 
|  | 164 | { | 
|  | 165 | struct hvsi_header *header = (struct hvsi_header *)packet; | 
|  | 166 | return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet) | 
|  | 170 | { | 
|  | 171 | if (hp->inbuf_end < packet + sizeof(struct hvsi_header)) | 
|  | 172 | return 0; /* don't even have the packet header */ | 
|  | 173 |  | 
|  | 174 | if (hp->inbuf_end < (packet + len_packet(packet))) | 
|  | 175 | return 0; /* don't have the rest of the packet */ | 
|  | 176 |  | 
|  | 177 | return 1; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | /* shift remaining bytes in packetbuf down */ | 
|  | 181 | static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to) | 
|  | 182 | { | 
|  | 183 | int remaining = (int)(hp->inbuf_end - read_to); | 
|  | 184 |  | 
|  | 185 | pr_debug("%s: %i chars remain\n", __func__, remaining); | 
|  | 186 |  | 
|  | 187 | if (read_to != hp->inbuf) | 
|  | 188 | memmove(hp->inbuf, read_to, remaining); | 
|  | 189 |  | 
|  | 190 | hp->inbuf_end = hp->inbuf + remaining; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | #ifdef DEBUG | 
|  | 194 | #define dbg_dump_packet(packet) dump_packet(packet) | 
|  | 195 | #define dbg_dump_hex(data, len) dump_hex(data, len) | 
|  | 196 | #else | 
|  | 197 | #define dbg_dump_packet(packet) do { } while (0) | 
|  | 198 | #define dbg_dump_hex(data, len) do { } while (0) | 
|  | 199 | #endif | 
|  | 200 |  | 
|  | 201 | static void dump_hex(const uint8_t *data, int len) | 
|  | 202 | { | 
|  | 203 | int i; | 
|  | 204 |  | 
|  | 205 | printk("    "); | 
|  | 206 | for (i=0; i < len; i++) | 
|  | 207 | printk("%.2x", data[i]); | 
|  | 208 |  | 
|  | 209 | printk("\n    "); | 
|  | 210 | for (i=0; i < len; i++) { | 
|  | 211 | if (isprint(data[i])) | 
|  | 212 | printk("%c", data[i]); | 
|  | 213 | else | 
|  | 214 | printk("."); | 
|  | 215 | } | 
|  | 216 | printk("\n"); | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | static void dump_packet(uint8_t *packet) | 
|  | 220 | { | 
|  | 221 | struct hvsi_header *header = (struct hvsi_header *)packet; | 
|  | 222 |  | 
|  | 223 | printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len, | 
|  | 224 | header->seqno); | 
|  | 225 |  | 
|  | 226 | dump_hex(packet, header->len); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | static int hvsi_read(struct hvsi_struct *hp, char *buf, int count) | 
|  | 230 | { | 
|  | 231 | unsigned long got; | 
|  | 232 |  | 
|  | 233 | got = hvc_get_chars(hp->vtermno, buf, count); | 
|  | 234 |  | 
|  | 235 | return got; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet, | 
|  | 239 | struct tty_struct *tty, struct hvsi_struct **to_handshake) | 
|  | 240 | { | 
|  | 241 | struct hvsi_control *header = (struct hvsi_control *)packet; | 
|  | 242 |  | 
|  | 243 | switch (be16_to_cpu(header->verb)) { | 
|  | 244 | case VSV_MODEM_CTL_UPDATE: | 
|  | 245 | if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) { | 
|  | 246 | /* CD went away; no more connection */ | 
|  | 247 | pr_debug("hvsi%i: CD dropped\n", hp->index); | 
|  | 248 | hp->mctrl &= TIOCM_CD; | 
|  | 249 | if (tty && !C_CLOCAL(tty)) | 
|  | 250 | tty_hangup(tty); | 
|  | 251 | } | 
|  | 252 | break; | 
|  | 253 | case VSV_CLOSE_PROTOCOL: | 
|  | 254 | pr_debug("hvsi%i: service processor came back\n", hp->index); | 
|  | 255 | if (hp->state != HVSI_CLOSED) { | 
|  | 256 | *to_handshake = hp; | 
|  | 257 | } | 
|  | 258 | break; | 
|  | 259 | default: | 
|  | 260 | printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ", | 
|  | 261 | hp->index); | 
|  | 262 | dump_packet(packet); | 
|  | 263 | break; | 
|  | 264 | } | 
|  | 265 | } | 
|  | 266 |  | 
|  | 267 | static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet) | 
|  | 268 | { | 
|  | 269 | struct hvsi_query_response *resp = (struct hvsi_query_response *)packet; | 
|  | 270 | uint32_t mctrl_word; | 
|  | 271 |  | 
|  | 272 | switch (hp->state) { | 
|  | 273 | case HVSI_WAIT_FOR_VER_RESPONSE: | 
|  | 274 | __set_state(hp, HVSI_WAIT_FOR_VER_QUERY); | 
|  | 275 | break; | 
|  | 276 | case HVSI_WAIT_FOR_MCTRL_RESPONSE: | 
|  | 277 | hp->mctrl = 0; | 
|  | 278 | mctrl_word = be32_to_cpu(resp->u.mctrl_word); | 
|  | 279 | if (mctrl_word & HVSI_TSDTR) | 
|  | 280 | hp->mctrl |= TIOCM_DTR; | 
|  | 281 | if (mctrl_word & HVSI_TSCD) | 
|  | 282 | hp->mctrl |= TIOCM_CD; | 
|  | 283 | __set_state(hp, HVSI_OPEN); | 
|  | 284 | break; | 
|  | 285 | default: | 
|  | 286 | printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index); | 
|  | 287 | dump_packet(packet); | 
|  | 288 | break; | 
|  | 289 | } | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | /* respond to service processor's version query */ | 
|  | 293 | static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno) | 
|  | 294 | { | 
|  | 295 | struct hvsi_query_response packet __ALIGNED__; | 
|  | 296 | int wrote; | 
|  | 297 |  | 
|  | 298 | packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER; | 
|  | 299 | packet.hdr.len = sizeof(struct hvsi_query_response); | 
|  | 300 | packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); | 
|  | 301 | packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER); | 
|  | 302 | packet.u.version = HVSI_VERSION; | 
|  | 303 | packet.query_seqno = cpu_to_be16(query_seqno+1); | 
|  | 304 |  | 
|  | 305 | pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); | 
|  | 306 | dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); | 
|  | 307 |  | 
|  | 308 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); | 
|  | 309 | if (wrote != packet.hdr.len) { | 
|  | 310 | printk(KERN_ERR "hvsi%i: couldn't send query response!\n", | 
|  | 311 | hp->index); | 
|  | 312 | return -EIO; | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | return 0; | 
|  | 316 | } | 
|  | 317 |  | 
|  | 318 | static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet) | 
|  | 319 | { | 
|  | 320 | struct hvsi_query *query = (struct hvsi_query *)packet; | 
|  | 321 |  | 
|  | 322 | switch (hp->state) { | 
|  | 323 | case HVSI_WAIT_FOR_VER_QUERY: | 
|  | 324 | hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno)); | 
|  | 325 | __set_state(hp, HVSI_OPEN); | 
|  | 326 | break; | 
|  | 327 | default: | 
|  | 328 | printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index); | 
|  | 329 | dump_packet(packet); | 
|  | 330 | break; | 
|  | 331 | } | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len) | 
|  | 335 | { | 
|  | 336 | int i; | 
|  | 337 |  | 
|  | 338 | for (i=0; i < len; i++) { | 
|  | 339 | char c = buf[i]; | 
|  | 340 | #ifdef CONFIG_MAGIC_SYSRQ | 
|  | 341 | if (c == '\0') { | 
|  | 342 | hp->sysrq = 1; | 
|  | 343 | continue; | 
|  | 344 | } else if (hp->sysrq) { | 
|  | 345 | handle_sysrq(c); | 
|  | 346 | hp->sysrq = 0; | 
|  | 347 | continue; | 
|  | 348 | } | 
|  | 349 | #endif /* CONFIG_MAGIC_SYSRQ */ | 
|  | 350 | tty_insert_flip_char(&hp->port, c, 0); | 
|  | 351 | } | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | /* | 
|  | 355 | * We could get 252 bytes of data at once here. But the tty layer only | 
|  | 356 | * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow | 
|  | 357 | * it. Accordingly we won't send more than 128 bytes at a time to the flip | 
|  | 358 | * buffer, which will give the tty buffer a chance to throttle us. Should the | 
|  | 359 | * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be | 
|  | 360 | * revisited. | 
|  | 361 | */ | 
|  | 362 | #define TTY_THRESHOLD_THROTTLE 128 | 
|  | 363 | static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet) | 
|  | 364 | { | 
|  | 365 | const struct hvsi_header *header = (const struct hvsi_header *)packet; | 
|  | 366 | const uint8_t *data = packet + sizeof(struct hvsi_header); | 
|  | 367 | int datalen = header->len - sizeof(struct hvsi_header); | 
|  | 368 | int overflow = datalen - TTY_THRESHOLD_THROTTLE; | 
|  | 369 |  | 
|  | 370 | pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data); | 
|  | 371 |  | 
|  | 372 | if (datalen == 0) | 
|  | 373 | return false; | 
|  | 374 |  | 
|  | 375 | if (overflow > 0) { | 
|  | 376 | pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__); | 
|  | 377 | datalen = TTY_THRESHOLD_THROTTLE; | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | hvsi_insert_chars(hp, data, datalen); | 
|  | 381 |  | 
|  | 382 | if (overflow > 0) { | 
|  | 383 | /* | 
|  | 384 | * we still have more data to deliver, so we need to save off the | 
|  | 385 | * overflow and send it later | 
|  | 386 | */ | 
|  | 387 | pr_debug("%s: deferring overflow\n", __func__); | 
|  | 388 | memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow); | 
|  | 389 | hp->n_throttle = overflow; | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | return true; | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | /* | 
|  | 396 | * Returns true/false indicating data successfully read from hypervisor. | 
|  | 397 | * Used both to get packets for tty connections and to advance the state | 
|  | 398 | * machine during console handshaking (in which case tty = NULL and we ignore | 
|  | 399 | * incoming data). | 
|  | 400 | */ | 
|  | 401 | static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty, | 
|  | 402 | struct hvsi_struct **handshake) | 
|  | 403 | { | 
|  | 404 | uint8_t *packet = hp->inbuf; | 
|  | 405 | int chunklen; | 
|  | 406 | bool flip = false; | 
|  | 407 |  | 
|  | 408 | *handshake = NULL; | 
|  | 409 |  | 
|  | 410 | chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ); | 
|  | 411 | if (chunklen == 0) { | 
|  | 412 | pr_debug("%s: 0-length read\n", __func__); | 
|  | 413 | return 0; | 
|  | 414 | } | 
|  | 415 |  | 
|  | 416 | pr_debug("%s: got %i bytes\n", __func__, chunklen); | 
|  | 417 | dbg_dump_hex(hp->inbuf_end, chunklen); | 
|  | 418 |  | 
|  | 419 | hp->inbuf_end += chunklen; | 
|  | 420 |  | 
|  | 421 | /* handle all completed packets */ | 
|  | 422 | while ((packet < hp->inbuf_end) && got_packet(hp, packet)) { | 
|  | 423 | struct hvsi_header *header = (struct hvsi_header *)packet; | 
|  | 424 |  | 
|  | 425 | if (!is_header(packet)) { | 
|  | 426 | printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index); | 
|  | 427 | /* skip bytes until we find a header or run out of data */ | 
|  | 428 | while ((packet < hp->inbuf_end) && (!is_header(packet))) | 
|  | 429 | packet++; | 
|  | 430 | continue; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | pr_debug("%s: handling %i-byte packet\n", __func__, | 
|  | 434 | len_packet(packet)); | 
|  | 435 | dbg_dump_packet(packet); | 
|  | 436 |  | 
|  | 437 | switch (header->type) { | 
|  | 438 | case VS_DATA_PACKET_HEADER: | 
|  | 439 | if (!is_open(hp)) | 
|  | 440 | break; | 
|  | 441 | flip = hvsi_recv_data(hp, packet); | 
|  | 442 | break; | 
|  | 443 | case VS_CONTROL_PACKET_HEADER: | 
|  | 444 | hvsi_recv_control(hp, packet, tty, handshake); | 
|  | 445 | break; | 
|  | 446 | case VS_QUERY_RESPONSE_PACKET_HEADER: | 
|  | 447 | hvsi_recv_response(hp, packet); | 
|  | 448 | break; | 
|  | 449 | case VS_QUERY_PACKET_HEADER: | 
|  | 450 | hvsi_recv_query(hp, packet); | 
|  | 451 | break; | 
|  | 452 | default: | 
|  | 453 | printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n", | 
|  | 454 | hp->index, header->type); | 
|  | 455 | dump_packet(packet); | 
|  | 456 | break; | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | packet += len_packet(packet); | 
|  | 460 |  | 
|  | 461 | if (*handshake) { | 
|  | 462 | pr_debug("%s: handshake\n", __func__); | 
|  | 463 | break; | 
|  | 464 | } | 
|  | 465 | } | 
|  | 466 |  | 
|  | 467 | compact_inbuf(hp, packet); | 
|  | 468 |  | 
|  | 469 | if (flip) | 
|  | 470 | tty_flip_buffer_push(&hp->port); | 
|  | 471 |  | 
|  | 472 | return 1; | 
|  | 473 | } | 
|  | 474 |  | 
|  | 475 | static void hvsi_send_overflow(struct hvsi_struct *hp) | 
|  | 476 | { | 
|  | 477 | pr_debug("%s: delivering %i bytes overflow\n", __func__, | 
|  | 478 | hp->n_throttle); | 
|  | 479 |  | 
|  | 480 | hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle); | 
|  | 481 | hp->n_throttle = 0; | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | /* | 
|  | 485 | * must get all pending data because we only get an irq on empty->non-empty | 
|  | 486 | * transition | 
|  | 487 | */ | 
|  | 488 | static irqreturn_t hvsi_interrupt(int irq, void *arg) | 
|  | 489 | { | 
|  | 490 | struct hvsi_struct *hp = (struct hvsi_struct *)arg; | 
|  | 491 | struct hvsi_struct *handshake; | 
|  | 492 | struct tty_struct *tty; | 
|  | 493 | unsigned long flags; | 
|  | 494 | int again = 1; | 
|  | 495 |  | 
|  | 496 | pr_debug("%s\n", __func__); | 
|  | 497 |  | 
|  | 498 | tty = tty_port_tty_get(&hp->port); | 
|  | 499 |  | 
|  | 500 | while (again) { | 
|  | 501 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 502 | again = hvsi_load_chunk(hp, tty, &handshake); | 
|  | 503 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 504 |  | 
|  | 505 | if (handshake) { | 
|  | 506 | pr_debug("hvsi%i: attempting re-handshake\n", handshake->index); | 
|  | 507 | schedule_work(&handshake->handshaker); | 
|  | 508 | } | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 512 | if (tty && hp->n_throttle && !tty_throttled(tty)) { | 
|  | 513 | /* we weren't hung up and we weren't throttled, so we can | 
|  | 514 | * deliver the rest now */ | 
|  | 515 | hvsi_send_overflow(hp); | 
|  | 516 | tty_flip_buffer_push(&hp->port); | 
|  | 517 | } | 
|  | 518 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 519 |  | 
|  | 520 | tty_kref_put(tty); | 
|  | 521 |  | 
|  | 522 | return IRQ_HANDLED; | 
|  | 523 | } | 
|  | 524 |  | 
|  | 525 | /* for boot console, before the irq handler is running */ | 
|  | 526 | static int __init poll_for_state(struct hvsi_struct *hp, int state) | 
|  | 527 | { | 
|  | 528 | unsigned long end_jiffies = jiffies + HVSI_TIMEOUT; | 
|  | 529 |  | 
|  | 530 | for (;;) { | 
|  | 531 | hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */ | 
|  | 532 |  | 
|  | 533 | if (hp->state == state) | 
|  | 534 | return 0; | 
|  | 535 |  | 
|  | 536 | mdelay(5); | 
|  | 537 | if (time_after(jiffies, end_jiffies)) | 
|  | 538 | return -EIO; | 
|  | 539 | } | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | /* wait for irq handler to change our state */ | 
|  | 543 | static int wait_for_state(struct hvsi_struct *hp, int state) | 
|  | 544 | { | 
|  | 545 | int ret = 0; | 
|  | 546 |  | 
|  | 547 | if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT)) | 
|  | 548 | ret = -EIO; | 
|  | 549 |  | 
|  | 550 | return ret; | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | static int hvsi_query(struct hvsi_struct *hp, uint16_t verb) | 
|  | 554 | { | 
|  | 555 | struct hvsi_query packet __ALIGNED__; | 
|  | 556 | int wrote; | 
|  | 557 |  | 
|  | 558 | packet.hdr.type = VS_QUERY_PACKET_HEADER; | 
|  | 559 | packet.hdr.len = sizeof(struct hvsi_query); | 
|  | 560 | packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); | 
|  | 561 | packet.verb = cpu_to_be16(verb); | 
|  | 562 |  | 
|  | 563 | pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); | 
|  | 564 | dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); | 
|  | 565 |  | 
|  | 566 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); | 
|  | 567 | if (wrote != packet.hdr.len) { | 
|  | 568 | printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index, | 
|  | 569 | wrote); | 
|  | 570 | return -EIO; | 
|  | 571 | } | 
|  | 572 |  | 
|  | 573 | return 0; | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | static int hvsi_get_mctrl(struct hvsi_struct *hp) | 
|  | 577 | { | 
|  | 578 | int ret; | 
|  | 579 |  | 
|  | 580 | set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE); | 
|  | 581 | hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS); | 
|  | 582 |  | 
|  | 583 | ret = hvsi_wait(hp, HVSI_OPEN); | 
|  | 584 | if (ret < 0) { | 
|  | 585 | printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index); | 
|  | 586 | set_state(hp, HVSI_OPEN); | 
|  | 587 | return ret; | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl); | 
|  | 591 |  | 
|  | 592 | return 0; | 
|  | 593 | } | 
|  | 594 |  | 
|  | 595 | /* note that we can only set DTR */ | 
|  | 596 | static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl) | 
|  | 597 | { | 
|  | 598 | struct hvsi_control packet __ALIGNED__; | 
|  | 599 | int wrote; | 
|  | 600 |  | 
|  | 601 | packet.hdr.type = VS_CONTROL_PACKET_HEADER; | 
|  | 602 | packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); | 
|  | 603 | packet.hdr.len = sizeof(struct hvsi_control); | 
|  | 604 | packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL); | 
|  | 605 | packet.mask = cpu_to_be32(HVSI_TSDTR); | 
|  | 606 |  | 
|  | 607 | if (mctrl & TIOCM_DTR) | 
|  | 608 | packet.word = cpu_to_be32(HVSI_TSDTR); | 
|  | 609 |  | 
|  | 610 | pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); | 
|  | 611 | dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); | 
|  | 612 |  | 
|  | 613 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); | 
|  | 614 | if (wrote != packet.hdr.len) { | 
|  | 615 | printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index); | 
|  | 616 | return -EIO; | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | return 0; | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 | static void hvsi_drain_input(struct hvsi_struct *hp) | 
|  | 623 | { | 
|  | 624 | uint8_t buf[HVSI_MAX_READ] __ALIGNED__; | 
|  | 625 | unsigned long end_jiffies = jiffies + HVSI_TIMEOUT; | 
|  | 626 |  | 
|  | 627 | while (time_before(end_jiffies, jiffies)) | 
|  | 628 | if (0 == hvsi_read(hp, buf, HVSI_MAX_READ)) | 
|  | 629 | break; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | static int hvsi_handshake(struct hvsi_struct *hp) | 
|  | 633 | { | 
|  | 634 | int ret; | 
|  | 635 |  | 
|  | 636 | /* | 
|  | 637 | * We could have a CLOSE or other data waiting for us before we even try | 
|  | 638 | * to open; try to throw it all away so we don't get confused. (CLOSE | 
|  | 639 | * is the first message sent up the pipe when the FSP comes online. We | 
|  | 640 | * need to distinguish between "it came up a while ago and we're the first | 
|  | 641 | * user" and "it was just reset before it saw our handshake packet".) | 
|  | 642 | */ | 
|  | 643 | hvsi_drain_input(hp); | 
|  | 644 |  | 
|  | 645 | set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE); | 
|  | 646 | ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER); | 
|  | 647 | if (ret < 0) { | 
|  | 648 | printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index); | 
|  | 649 | return ret; | 
|  | 650 | } | 
|  | 651 |  | 
|  | 652 | ret = hvsi_wait(hp, HVSI_OPEN); | 
|  | 653 | if (ret < 0) | 
|  | 654 | return ret; | 
|  | 655 |  | 
|  | 656 | return 0; | 
|  | 657 | } | 
|  | 658 |  | 
|  | 659 | static void hvsi_handshaker(struct work_struct *work) | 
|  | 660 | { | 
|  | 661 | struct hvsi_struct *hp = | 
|  | 662 | container_of(work, struct hvsi_struct, handshaker); | 
|  | 663 |  | 
|  | 664 | if (hvsi_handshake(hp) >= 0) | 
|  | 665 | return; | 
|  | 666 |  | 
|  | 667 | printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index); | 
|  | 668 | if (is_console(hp)) { | 
|  | 669 | /* | 
|  | 670 | * ttys will re-attempt the handshake via hvsi_open, but | 
|  | 671 | * the console will not. | 
|  | 672 | */ | 
|  | 673 | printk(KERN_ERR "hvsi%i: lost console!\n", hp->index); | 
|  | 674 | } | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count) | 
|  | 678 | { | 
|  | 679 | struct hvsi_data packet __ALIGNED__; | 
|  | 680 | int ret; | 
|  | 681 |  | 
|  | 682 | BUG_ON(count > HVSI_MAX_OUTGOING_DATA); | 
|  | 683 |  | 
|  | 684 | packet.hdr.type = VS_DATA_PACKET_HEADER; | 
|  | 685 | packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); | 
|  | 686 | packet.hdr.len = count + sizeof(struct hvsi_header); | 
|  | 687 | memcpy(&packet.data, buf, count); | 
|  | 688 |  | 
|  | 689 | ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); | 
|  | 690 | if (ret == packet.hdr.len) { | 
|  | 691 | /* return the number of chars written, not the packet length */ | 
|  | 692 | return count; | 
|  | 693 | } | 
|  | 694 | return ret; /* return any errors */ | 
|  | 695 | } | 
|  | 696 |  | 
|  | 697 | static void hvsi_close_protocol(struct hvsi_struct *hp) | 
|  | 698 | { | 
|  | 699 | struct hvsi_control packet __ALIGNED__; | 
|  | 700 |  | 
|  | 701 | packet.hdr.type = VS_CONTROL_PACKET_HEADER; | 
|  | 702 | packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno)); | 
|  | 703 | packet.hdr.len = 6; | 
|  | 704 | packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL); | 
|  | 705 |  | 
|  | 706 | pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); | 
|  | 707 | dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); | 
|  | 708 |  | 
|  | 709 | hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | static int hvsi_open(struct tty_struct *tty, struct file *filp) | 
|  | 713 | { | 
|  | 714 | struct hvsi_struct *hp; | 
|  | 715 | unsigned long flags; | 
|  | 716 | int ret; | 
|  | 717 |  | 
|  | 718 | pr_debug("%s\n", __func__); | 
|  | 719 |  | 
|  | 720 | hp = &hvsi_ports[tty->index]; | 
|  | 721 |  | 
|  | 722 | tty->driver_data = hp; | 
|  | 723 |  | 
|  | 724 | mb(); | 
|  | 725 | if (hp->state == HVSI_FSP_DIED) | 
|  | 726 | return -EIO; | 
|  | 727 |  | 
|  | 728 | tty_port_tty_set(&hp->port, tty); | 
|  | 729 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 730 | hp->port.count++; | 
|  | 731 | atomic_set(&hp->seqno, 0); | 
|  | 732 | h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); | 
|  | 733 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 734 |  | 
|  | 735 | if (is_console(hp)) | 
|  | 736 | return 0; /* this has already been handshaked as the console */ | 
|  | 737 |  | 
|  | 738 | ret = hvsi_handshake(hp); | 
|  | 739 | if (ret < 0) { | 
|  | 740 | printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name); | 
|  | 741 | return ret; | 
|  | 742 | } | 
|  | 743 |  | 
|  | 744 | ret = hvsi_get_mctrl(hp); | 
|  | 745 | if (ret < 0) { | 
|  | 746 | printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name); | 
|  | 747 | return ret; | 
|  | 748 | } | 
|  | 749 |  | 
|  | 750 | ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR); | 
|  | 751 | if (ret < 0) { | 
|  | 752 | printk(KERN_ERR "%s: couldn't set DTR\n", tty->name); | 
|  | 753 | return ret; | 
|  | 754 | } | 
|  | 755 |  | 
|  | 756 | return 0; | 
|  | 757 | } | 
|  | 758 |  | 
|  | 759 | /* wait for hvsi_write_worker to empty hp->outbuf */ | 
|  | 760 | static void hvsi_flush_output(struct hvsi_struct *hp) | 
|  | 761 | { | 
|  | 762 | wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT); | 
|  | 763 |  | 
|  | 764 | /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */ | 
|  | 765 | cancel_delayed_work_sync(&hp->writer); | 
|  | 766 | flush_work(&hp->handshaker); | 
|  | 767 |  | 
|  | 768 | /* | 
|  | 769 | * it's also possible that our timeout expired and hvsi_write_worker | 
|  | 770 | * didn't manage to push outbuf. poof. | 
|  | 771 | */ | 
|  | 772 | hp->n_outbuf = 0; | 
|  | 773 | } | 
|  | 774 |  | 
|  | 775 | static void hvsi_close(struct tty_struct *tty, struct file *filp) | 
|  | 776 | { | 
|  | 777 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 778 | unsigned long flags; | 
|  | 779 |  | 
|  | 780 | pr_debug("%s\n", __func__); | 
|  | 781 |  | 
|  | 782 | if (tty_hung_up_p(filp)) | 
|  | 783 | return; | 
|  | 784 |  | 
|  | 785 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 786 |  | 
|  | 787 | if (--hp->port.count == 0) { | 
|  | 788 | tty_port_tty_set(&hp->port, NULL); | 
|  | 789 | hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */ | 
|  | 790 |  | 
|  | 791 | /* only close down connection if it is not the console */ | 
|  | 792 | if (!is_console(hp)) { | 
|  | 793 | h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */ | 
|  | 794 | __set_state(hp, HVSI_CLOSED); | 
|  | 795 | /* | 
|  | 796 | * any data delivered to the tty layer after this will be | 
|  | 797 | * discarded (except for XON/XOFF) | 
|  | 798 | */ | 
|  | 799 | tty->closing = 1; | 
|  | 800 |  | 
|  | 801 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 802 |  | 
|  | 803 | /* let any existing irq handlers finish. no more will start. */ | 
|  | 804 | synchronize_irq(hp->virq); | 
|  | 805 |  | 
|  | 806 | /* hvsi_write_worker will re-schedule until outbuf is empty. */ | 
|  | 807 | hvsi_flush_output(hp); | 
|  | 808 |  | 
|  | 809 | /* tell FSP to stop sending data */ | 
|  | 810 | hvsi_close_protocol(hp); | 
|  | 811 |  | 
|  | 812 | /* | 
|  | 813 | * drain anything FSP is still in the middle of sending, and let | 
|  | 814 | * hvsi_handshake drain the rest on the next open. | 
|  | 815 | */ | 
|  | 816 | hvsi_drain_input(hp); | 
|  | 817 |  | 
|  | 818 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 819 | } | 
|  | 820 | } else if (hp->port.count < 0) | 
|  | 821 | printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n", | 
|  | 822 | hp - hvsi_ports, hp->port.count); | 
|  | 823 |  | 
|  | 824 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 825 | } | 
|  | 826 |  | 
|  | 827 | static void hvsi_hangup(struct tty_struct *tty) | 
|  | 828 | { | 
|  | 829 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 830 | unsigned long flags; | 
|  | 831 |  | 
|  | 832 | pr_debug("%s\n", __func__); | 
|  | 833 |  | 
|  | 834 | tty_port_tty_set(&hp->port, NULL); | 
|  | 835 |  | 
|  | 836 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 837 | hp->port.count = 0; | 
|  | 838 | hp->n_outbuf = 0; | 
|  | 839 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 840 | } | 
|  | 841 |  | 
|  | 842 | /* called with hp->lock held */ | 
|  | 843 | static void hvsi_push(struct hvsi_struct *hp) | 
|  | 844 | { | 
|  | 845 | int n; | 
|  | 846 |  | 
|  | 847 | if (hp->n_outbuf <= 0) | 
|  | 848 | return; | 
|  | 849 |  | 
|  | 850 | n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); | 
|  | 851 | if (n > 0) { | 
|  | 852 | /* success */ | 
|  | 853 | pr_debug("%s: wrote %i chars\n", __func__, n); | 
|  | 854 | hp->n_outbuf = 0; | 
|  | 855 | } else if (n == -EIO) { | 
|  | 856 | __set_state(hp, HVSI_FSP_DIED); | 
|  | 857 | printk(KERN_ERR "hvsi%i: service processor died\n", hp->index); | 
|  | 858 | } | 
|  | 859 | } | 
|  | 860 |  | 
|  | 861 | /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ | 
|  | 862 | static void hvsi_write_worker(struct work_struct *work) | 
|  | 863 | { | 
|  | 864 | struct hvsi_struct *hp = | 
|  | 865 | container_of(work, struct hvsi_struct, writer.work); | 
|  | 866 | unsigned long flags; | 
|  | 867 | #ifdef DEBUG | 
|  | 868 | static long start_j = 0; | 
|  | 869 |  | 
|  | 870 | if (start_j == 0) | 
|  | 871 | start_j = jiffies; | 
|  | 872 | #endif /* DEBUG */ | 
|  | 873 |  | 
|  | 874 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 875 |  | 
|  | 876 | pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); | 
|  | 877 |  | 
|  | 878 | if (!is_open(hp)) { | 
|  | 879 | /* | 
|  | 880 | * We could have a non-open connection if the service processor died | 
|  | 881 | * while we were busily scheduling ourselves. In that case, it could | 
|  | 882 | * be minutes before the service processor comes back, so only try | 
|  | 883 | * again once a second. | 
|  | 884 | */ | 
|  | 885 | schedule_delayed_work(&hp->writer, HZ); | 
|  | 886 | goto out; | 
|  | 887 | } | 
|  | 888 |  | 
|  | 889 | hvsi_push(hp); | 
|  | 890 | if (hp->n_outbuf > 0) | 
|  | 891 | schedule_delayed_work(&hp->writer, 10); | 
|  | 892 | else { | 
|  | 893 | #ifdef DEBUG | 
|  | 894 | pr_debug("%s: outbuf emptied after %li jiffies\n", __func__, | 
|  | 895 | jiffies - start_j); | 
|  | 896 | start_j = 0; | 
|  | 897 | #endif /* DEBUG */ | 
|  | 898 | wake_up_all(&hp->emptyq); | 
|  | 899 | tty_port_tty_wakeup(&hp->port); | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | out: | 
|  | 903 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 904 | } | 
|  | 905 |  | 
|  | 906 | static int hvsi_write_room(struct tty_struct *tty) | 
|  | 907 | { | 
|  | 908 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 909 |  | 
|  | 910 | return N_OUTBUF - hp->n_outbuf; | 
|  | 911 | } | 
|  | 912 |  | 
|  | 913 | static int hvsi_chars_in_buffer(struct tty_struct *tty) | 
|  | 914 | { | 
|  | 915 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 916 |  | 
|  | 917 | return hp->n_outbuf; | 
|  | 918 | } | 
|  | 919 |  | 
|  | 920 | static int hvsi_write(struct tty_struct *tty, | 
|  | 921 | const unsigned char *buf, int count) | 
|  | 922 | { | 
|  | 923 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 924 | const char *source = buf; | 
|  | 925 | unsigned long flags; | 
|  | 926 | int total = 0; | 
|  | 927 | int origcount = count; | 
|  | 928 |  | 
|  | 929 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 930 |  | 
|  | 931 | pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); | 
|  | 932 |  | 
|  | 933 | if (!is_open(hp)) { | 
|  | 934 | /* we're either closing or not yet open; don't accept data */ | 
|  | 935 | pr_debug("%s: not open\n", __func__); | 
|  | 936 | goto out; | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | /* | 
|  | 940 | * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf | 
|  | 941 | * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls | 
|  | 942 | * will see there is no room in outbuf and return. | 
|  | 943 | */ | 
|  | 944 | while ((count > 0) && (hvsi_write_room(tty) > 0)) { | 
|  | 945 | int chunksize = min(count, hvsi_write_room(tty)); | 
|  | 946 |  | 
|  | 947 | BUG_ON(hp->n_outbuf < 0); | 
|  | 948 | memcpy(hp->outbuf + hp->n_outbuf, source, chunksize); | 
|  | 949 | hp->n_outbuf += chunksize; | 
|  | 950 |  | 
|  | 951 | total += chunksize; | 
|  | 952 | source += chunksize; | 
|  | 953 | count -= chunksize; | 
|  | 954 | hvsi_push(hp); | 
|  | 955 | } | 
|  | 956 |  | 
|  | 957 | if (hp->n_outbuf > 0) { | 
|  | 958 | /* | 
|  | 959 | * we weren't able to write it all to the hypervisor. | 
|  | 960 | * schedule another push attempt. | 
|  | 961 | */ | 
|  | 962 | schedule_delayed_work(&hp->writer, 10); | 
|  | 963 | } | 
|  | 964 |  | 
|  | 965 | out: | 
|  | 966 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 967 |  | 
|  | 968 | if (total != origcount) | 
|  | 969 | pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount, | 
|  | 970 | total); | 
|  | 971 |  | 
|  | 972 | return total; | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | /* | 
|  | 976 | * I have never seen throttle or unthrottle called, so this little throttle | 
|  | 977 | * buffering scheme may or may not work. | 
|  | 978 | */ | 
|  | 979 | static void hvsi_throttle(struct tty_struct *tty) | 
|  | 980 | { | 
|  | 981 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 982 |  | 
|  | 983 | pr_debug("%s\n", __func__); | 
|  | 984 |  | 
|  | 985 | h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); | 
|  | 986 | } | 
|  | 987 |  | 
|  | 988 | static void hvsi_unthrottle(struct tty_struct *tty) | 
|  | 989 | { | 
|  | 990 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 991 | unsigned long flags; | 
|  | 992 |  | 
|  | 993 | pr_debug("%s\n", __func__); | 
|  | 994 |  | 
|  | 995 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 996 | if (hp->n_throttle) { | 
|  | 997 | hvsi_send_overflow(hp); | 
|  | 998 | tty_flip_buffer_push(&hp->port); | 
|  | 999 | } | 
|  | 1000 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 1001 |  | 
|  | 1002 |  | 
|  | 1003 | h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); | 
|  | 1004 | } | 
|  | 1005 |  | 
|  | 1006 | static int hvsi_tiocmget(struct tty_struct *tty) | 
|  | 1007 | { | 
|  | 1008 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 1009 |  | 
|  | 1010 | hvsi_get_mctrl(hp); | 
|  | 1011 | return hp->mctrl; | 
|  | 1012 | } | 
|  | 1013 |  | 
|  | 1014 | static int hvsi_tiocmset(struct tty_struct *tty, | 
|  | 1015 | unsigned int set, unsigned int clear) | 
|  | 1016 | { | 
|  | 1017 | struct hvsi_struct *hp = tty->driver_data; | 
|  | 1018 | unsigned long flags; | 
|  | 1019 | uint16_t new_mctrl; | 
|  | 1020 |  | 
|  | 1021 | /* we can only alter DTR */ | 
|  | 1022 | clear &= TIOCM_DTR; | 
|  | 1023 | set &= TIOCM_DTR; | 
|  | 1024 |  | 
|  | 1025 | spin_lock_irqsave(&hp->lock, flags); | 
|  | 1026 |  | 
|  | 1027 | new_mctrl = (hp->mctrl & ~clear) | set; | 
|  | 1028 |  | 
|  | 1029 | if (hp->mctrl != new_mctrl) { | 
|  | 1030 | hvsi_set_mctrl(hp, new_mctrl); | 
|  | 1031 | hp->mctrl = new_mctrl; | 
|  | 1032 | } | 
|  | 1033 | spin_unlock_irqrestore(&hp->lock, flags); | 
|  | 1034 |  | 
|  | 1035 | return 0; | 
|  | 1036 | } | 
|  | 1037 |  | 
|  | 1038 |  | 
|  | 1039 | static const struct tty_operations hvsi_ops = { | 
|  | 1040 | .open = hvsi_open, | 
|  | 1041 | .close = hvsi_close, | 
|  | 1042 | .write = hvsi_write, | 
|  | 1043 | .hangup = hvsi_hangup, | 
|  | 1044 | .write_room = hvsi_write_room, | 
|  | 1045 | .chars_in_buffer = hvsi_chars_in_buffer, | 
|  | 1046 | .throttle = hvsi_throttle, | 
|  | 1047 | .unthrottle = hvsi_unthrottle, | 
|  | 1048 | .tiocmget = hvsi_tiocmget, | 
|  | 1049 | .tiocmset = hvsi_tiocmset, | 
|  | 1050 | }; | 
|  | 1051 |  | 
|  | 1052 | static int __init hvsi_init(void) | 
|  | 1053 | { | 
|  | 1054 | int i; | 
|  | 1055 |  | 
|  | 1056 | hvsi_driver = alloc_tty_driver(hvsi_count); | 
|  | 1057 | if (!hvsi_driver) | 
|  | 1058 | return -ENOMEM; | 
|  | 1059 |  | 
|  | 1060 | hvsi_driver->driver_name = "hvsi"; | 
|  | 1061 | hvsi_driver->name = "hvsi"; | 
|  | 1062 | hvsi_driver->major = HVSI_MAJOR; | 
|  | 1063 | hvsi_driver->minor_start = HVSI_MINOR; | 
|  | 1064 | hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM; | 
|  | 1065 | hvsi_driver->init_termios = tty_std_termios; | 
|  | 1066 | hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; | 
|  | 1067 | hvsi_driver->init_termios.c_ispeed = 9600; | 
|  | 1068 | hvsi_driver->init_termios.c_ospeed = 9600; | 
|  | 1069 | hvsi_driver->flags = TTY_DRIVER_REAL_RAW; | 
|  | 1070 | tty_set_operations(hvsi_driver, &hvsi_ops); | 
|  | 1071 |  | 
|  | 1072 | for (i=0; i < hvsi_count; i++) { | 
|  | 1073 | struct hvsi_struct *hp = &hvsi_ports[i]; | 
|  | 1074 | int ret = 1; | 
|  | 1075 |  | 
|  | 1076 | tty_port_link_device(&hp->port, hvsi_driver, i); | 
|  | 1077 |  | 
|  | 1078 | ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp); | 
|  | 1079 | if (ret) | 
|  | 1080 | printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n", | 
|  | 1081 | hp->virq, ret); | 
|  | 1082 | } | 
|  | 1083 | hvsi_wait = wait_for_state; /* irqs active now */ | 
|  | 1084 |  | 
|  | 1085 | if (tty_register_driver(hvsi_driver)) | 
|  | 1086 | panic("Couldn't register hvsi console driver\n"); | 
|  | 1087 |  | 
|  | 1088 | printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count); | 
|  | 1089 |  | 
|  | 1090 | return 0; | 
|  | 1091 | } | 
|  | 1092 | device_initcall(hvsi_init); | 
|  | 1093 |  | 
|  | 1094 | /***** console (not tty) code: *****/ | 
|  | 1095 |  | 
|  | 1096 | static void hvsi_console_print(struct console *console, const char *buf, | 
|  | 1097 | unsigned int count) | 
|  | 1098 | { | 
|  | 1099 | struct hvsi_struct *hp = &hvsi_ports[console->index]; | 
|  | 1100 | char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__; | 
|  | 1101 | unsigned int i = 0, n = 0; | 
|  | 1102 | int ret, donecr = 0; | 
|  | 1103 |  | 
|  | 1104 | mb(); | 
|  | 1105 | if (!is_open(hp)) | 
|  | 1106 | return; | 
|  | 1107 |  | 
|  | 1108 | /* | 
|  | 1109 | * ugh, we have to translate LF -> CRLF ourselves, in place. | 
|  | 1110 | * copied from hvc_console.c: | 
|  | 1111 | */ | 
|  | 1112 | while (count > 0 || i > 0) { | 
|  | 1113 | if (count > 0 && i < sizeof(c)) { | 
|  | 1114 | if (buf[n] == '\n' && !donecr) { | 
|  | 1115 | c[i++] = '\r'; | 
|  | 1116 | donecr = 1; | 
|  | 1117 | } else { | 
|  | 1118 | c[i++] = buf[n++]; | 
|  | 1119 | donecr = 0; | 
|  | 1120 | --count; | 
|  | 1121 | } | 
|  | 1122 | } else { | 
|  | 1123 | ret = hvsi_put_chars(hp, c, i); | 
|  | 1124 | if (ret < 0) | 
|  | 1125 | i = 0; | 
|  | 1126 | i -= ret; | 
|  | 1127 | } | 
|  | 1128 | } | 
|  | 1129 | } | 
|  | 1130 |  | 
|  | 1131 | static struct tty_driver *hvsi_console_device(struct console *console, | 
|  | 1132 | int *index) | 
|  | 1133 | { | 
|  | 1134 | *index = console->index; | 
|  | 1135 | return hvsi_driver; | 
|  | 1136 | } | 
|  | 1137 |  | 
|  | 1138 | static int __init hvsi_console_setup(struct console *console, char *options) | 
|  | 1139 | { | 
|  | 1140 | struct hvsi_struct *hp; | 
|  | 1141 | int ret; | 
|  | 1142 |  | 
|  | 1143 | if (console->index < 0 || console->index >= hvsi_count) | 
|  | 1144 | return -1; | 
|  | 1145 | hp = &hvsi_ports[console->index]; | 
|  | 1146 |  | 
|  | 1147 | /* give the FSP a chance to change the baud rate when we re-open */ | 
|  | 1148 | hvsi_close_protocol(hp); | 
|  | 1149 |  | 
|  | 1150 | ret = hvsi_handshake(hp); | 
|  | 1151 | if (ret < 0) | 
|  | 1152 | return ret; | 
|  | 1153 |  | 
|  | 1154 | ret = hvsi_get_mctrl(hp); | 
|  | 1155 | if (ret < 0) | 
|  | 1156 | return ret; | 
|  | 1157 |  | 
|  | 1158 | ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR); | 
|  | 1159 | if (ret < 0) | 
|  | 1160 | return ret; | 
|  | 1161 |  | 
|  | 1162 | hp->flags |= HVSI_CONSOLE; | 
|  | 1163 |  | 
|  | 1164 | return 0; | 
|  | 1165 | } | 
|  | 1166 |  | 
|  | 1167 | static struct console hvsi_console = { | 
|  | 1168 | .name		= "hvsi", | 
|  | 1169 | .write		= hvsi_console_print, | 
|  | 1170 | .device		= hvsi_console_device, | 
|  | 1171 | .setup		= hvsi_console_setup, | 
|  | 1172 | .flags		= CON_PRINTBUFFER, | 
|  | 1173 | .index		= -1, | 
|  | 1174 | }; | 
|  | 1175 |  | 
|  | 1176 | static int __init hvsi_console_init(void) | 
|  | 1177 | { | 
|  | 1178 | struct device_node *vty; | 
|  | 1179 |  | 
|  | 1180 | hvsi_wait = poll_for_state; /* no irqs yet; must poll */ | 
|  | 1181 |  | 
|  | 1182 | /* search device tree for vty nodes */ | 
|  | 1183 | for_each_compatible_node(vty, "serial", "hvterm-protocol") { | 
|  | 1184 | struct hvsi_struct *hp; | 
|  | 1185 | const __be32 *vtermno, *irq; | 
|  | 1186 |  | 
|  | 1187 | vtermno = of_get_property(vty, "reg", NULL); | 
|  | 1188 | irq = of_get_property(vty, "interrupts", NULL); | 
|  | 1189 | if (!vtermno || !irq) | 
|  | 1190 | continue; | 
|  | 1191 |  | 
|  | 1192 | if (hvsi_count >= MAX_NR_HVSI_CONSOLES) { | 
|  | 1193 | of_node_put(vty); | 
|  | 1194 | break; | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | hp = &hvsi_ports[hvsi_count]; | 
|  | 1198 | INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); | 
|  | 1199 | INIT_WORK(&hp->handshaker, hvsi_handshaker); | 
|  | 1200 | init_waitqueue_head(&hp->emptyq); | 
|  | 1201 | init_waitqueue_head(&hp->stateq); | 
|  | 1202 | spin_lock_init(&hp->lock); | 
|  | 1203 | tty_port_init(&hp->port); | 
|  | 1204 | hp->index = hvsi_count; | 
|  | 1205 | hp->inbuf_end = hp->inbuf; | 
|  | 1206 | hp->state = HVSI_CLOSED; | 
|  | 1207 | hp->vtermno = be32_to_cpup(vtermno); | 
|  | 1208 | hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq)); | 
|  | 1209 | if (hp->virq == 0) { | 
|  | 1210 | printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", | 
|  | 1211 | __func__, be32_to_cpup(irq)); | 
|  | 1212 | tty_port_destroy(&hp->port); | 
|  | 1213 | continue; | 
|  | 1214 | } | 
|  | 1215 |  | 
|  | 1216 | hvsi_count++; | 
|  | 1217 | } | 
|  | 1218 |  | 
|  | 1219 | if (hvsi_count) | 
|  | 1220 | register_console(&hvsi_console); | 
|  | 1221 | return 0; | 
|  | 1222 | } | 
|  | 1223 | console_initcall(hvsi_console_init); |