b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * linux/kernel/reboot.c |
| 4 | * |
| 5 | * Copyright (C) 2013 Linus Torvalds |
| 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) "reboot: " fmt |
| 9 | |
| 10 | #include <linux/ctype.h> |
| 11 | #include <linux/export.h> |
| 12 | #include <linux/kexec.h> |
| 13 | #include <linux/kmod.h> |
| 14 | #include <linux/kmsg_dump.h> |
| 15 | #include <linux/reboot.h> |
| 16 | #include <linux/suspend.h> |
| 17 | #include <linux/syscalls.h> |
| 18 | #include <linux/syscore_ops.h> |
| 19 | #include <linux/uaccess.h> |
| 20 | |
| 21 | #ifdef CONFIG_PXA_RAMDUMP |
| 22 | #include <soc/asr/ramdump.h> |
| 23 | #endif |
| 24 | |
| 25 | #include <soc/asr/asr_mflag.h> |
| 26 | |
| 27 | /* |
| 28 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes |
| 29 | */ |
| 30 | |
| 31 | int C_A_D = 1; |
| 32 | struct pid *cad_pid; |
| 33 | EXPORT_SYMBOL(cad_pid); |
| 34 | |
| 35 | #if defined(CONFIG_ARM) || defined(CONFIG_UNICORE32) |
| 36 | #define DEFAULT_REBOOT_MODE = REBOOT_HARD |
| 37 | #else |
| 38 | #define DEFAULT_REBOOT_MODE |
| 39 | #endif |
| 40 | enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; |
| 41 | EXPORT_SYMBOL_GPL(reboot_mode); |
| 42 | enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED; |
| 43 | EXPORT_SYMBOL_GPL(panic_reboot_mode); |
| 44 | |
| 45 | /* |
| 46 | * This variable is used privately to keep track of whether or not |
| 47 | * reboot_type is still set to its default value (i.e., reboot= hasn't |
| 48 | * been set on the command line). This is needed so that we can |
| 49 | * suppress DMI scanning for reboot quirks. Without it, it's |
| 50 | * impossible to override a faulty reboot quirk without recompiling. |
| 51 | */ |
| 52 | int reboot_default = 1; |
| 53 | int reboot_cpu; |
| 54 | enum reboot_type reboot_type = BOOT_ACPI; |
| 55 | int reboot_force; |
| 56 | |
| 57 | /* |
| 58 | * If set, this is used for preparing the system to power off. |
| 59 | */ |
| 60 | |
| 61 | void (*pm_power_off_prepare)(void); |
| 62 | EXPORT_SYMBOL_GPL(pm_power_off_prepare); |
| 63 | |
| 64 | /** |
| 65 | * emergency_restart - reboot the system |
| 66 | * |
| 67 | * Without shutting down any hardware or taking any locks |
| 68 | * reboot the system. This is called when we know we are in |
| 69 | * trouble so this is our best effort to reboot. This is |
| 70 | * safe to call in interrupt context. |
| 71 | */ |
| 72 | void emergency_restart(void) |
| 73 | { |
| 74 | kmsg_dump(KMSG_DUMP_EMERG); |
| 75 | system_state = SYSTEM_RESTART; |
| 76 | machine_emergency_restart(); |
| 77 | } |
| 78 | EXPORT_SYMBOL_GPL(emergency_restart); |
| 79 | |
| 80 | void kernel_restart_prepare(char *cmd) |
| 81 | { |
| 82 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
| 83 | system_state = SYSTEM_RESTART; |
| 84 | usermodehelper_disable(); |
| 85 | device_shutdown(); |
| 86 | #ifdef CONFIG_PXA_RAMDUMP |
| 87 | ramdump_rdc_reset(); /* Clean RAMDUMP request on gracefull command */ |
| 88 | #endif |
| 89 | } |
| 90 | |
| 91 | /** |
| 92 | * register_reboot_notifier - Register function to be called at reboot time |
| 93 | * @nb: Info about notifier function to be called |
| 94 | * |
| 95 | * Registers a function with the list of functions |
| 96 | * to be called at reboot time. |
| 97 | * |
| 98 | * Currently always returns zero, as blocking_notifier_chain_register() |
| 99 | * always returns zero. |
| 100 | */ |
| 101 | int register_reboot_notifier(struct notifier_block *nb) |
| 102 | { |
| 103 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); |
| 104 | } |
| 105 | EXPORT_SYMBOL(register_reboot_notifier); |
| 106 | |
| 107 | /** |
| 108 | * unregister_reboot_notifier - Unregister previously registered reboot notifier |
| 109 | * @nb: Hook to be unregistered |
| 110 | * |
| 111 | * Unregisters a previously registered reboot |
| 112 | * notifier function. |
| 113 | * |
| 114 | * Returns zero on success, or %-ENOENT on failure. |
| 115 | */ |
| 116 | int unregister_reboot_notifier(struct notifier_block *nb) |
| 117 | { |
| 118 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); |
| 119 | } |
| 120 | EXPORT_SYMBOL(unregister_reboot_notifier); |
| 121 | |
| 122 | static void devm_unregister_reboot_notifier(struct device *dev, void *res) |
| 123 | { |
| 124 | WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res)); |
| 125 | } |
| 126 | |
| 127 | int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb) |
| 128 | { |
| 129 | struct notifier_block **rcnb; |
| 130 | int ret; |
| 131 | |
| 132 | rcnb = devres_alloc(devm_unregister_reboot_notifier, |
| 133 | sizeof(*rcnb), GFP_KERNEL); |
| 134 | if (!rcnb) |
| 135 | return -ENOMEM; |
| 136 | |
| 137 | ret = register_reboot_notifier(nb); |
| 138 | if (!ret) { |
| 139 | *rcnb = nb; |
| 140 | devres_add(dev, rcnb); |
| 141 | } else { |
| 142 | devres_free(rcnb); |
| 143 | } |
| 144 | |
| 145 | return ret; |
| 146 | } |
| 147 | EXPORT_SYMBOL(devm_register_reboot_notifier); |
| 148 | |
| 149 | /* |
| 150 | * Notifier list for kernel code which wants to be called |
| 151 | * to restart the system. |
| 152 | */ |
| 153 | static ATOMIC_NOTIFIER_HEAD(restart_handler_list); |
| 154 | |
| 155 | /** |
| 156 | * register_restart_handler - Register function to be called to reset |
| 157 | * the system |
| 158 | * @nb: Info about handler function to be called |
| 159 | * @nb->priority: Handler priority. Handlers should follow the |
| 160 | * following guidelines for setting priorities. |
| 161 | * 0: Restart handler of last resort, |
| 162 | * with limited restart capabilities |
| 163 | * 128: Default restart handler; use if no other |
| 164 | * restart handler is expected to be available, |
| 165 | * and/or if restart functionality is |
| 166 | * sufficient to restart the entire system |
| 167 | * 255: Highest priority restart handler, will |
| 168 | * preempt all other restart handlers |
| 169 | * |
| 170 | * Registers a function with code to be called to restart the |
| 171 | * system. |
| 172 | * |
| 173 | * Registered functions will be called from machine_restart as last |
| 174 | * step of the restart sequence (if the architecture specific |
| 175 | * machine_restart function calls do_kernel_restart - see below |
| 176 | * for details). |
| 177 | * Registered functions are expected to restart the system immediately. |
| 178 | * If more than one function is registered, the restart handler priority |
| 179 | * selects which function will be called first. |
| 180 | * |
| 181 | * Restart handlers are expected to be registered from non-architecture |
| 182 | * code, typically from drivers. A typical use case would be a system |
| 183 | * where restart functionality is provided through a watchdog. Multiple |
| 184 | * restart handlers may exist; for example, one restart handler might |
| 185 | * restart the entire system, while another only restarts the CPU. |
| 186 | * In such cases, the restart handler which only restarts part of the |
| 187 | * hardware is expected to register with low priority to ensure that |
| 188 | * it only runs if no other means to restart the system is available. |
| 189 | * |
| 190 | * Currently always returns zero, as atomic_notifier_chain_register() |
| 191 | * always returns zero. |
| 192 | */ |
| 193 | int register_restart_handler(struct notifier_block *nb) |
| 194 | { |
| 195 | return atomic_notifier_chain_register(&restart_handler_list, nb); |
| 196 | } |
| 197 | EXPORT_SYMBOL(register_restart_handler); |
| 198 | |
| 199 | /** |
| 200 | * unregister_restart_handler - Unregister previously registered |
| 201 | * restart handler |
| 202 | * @nb: Hook to be unregistered |
| 203 | * |
| 204 | * Unregisters a previously registered restart handler function. |
| 205 | * |
| 206 | * Returns zero on success, or %-ENOENT on failure. |
| 207 | */ |
| 208 | int unregister_restart_handler(struct notifier_block *nb) |
| 209 | { |
| 210 | return atomic_notifier_chain_unregister(&restart_handler_list, nb); |
| 211 | } |
| 212 | EXPORT_SYMBOL(unregister_restart_handler); |
| 213 | |
| 214 | /** |
| 215 | * do_kernel_restart - Execute kernel restart handler call chain |
| 216 | * |
| 217 | * Calls functions registered with register_restart_handler. |
| 218 | * |
| 219 | * Expected to be called from machine_restart as last step of the restart |
| 220 | * sequence. |
| 221 | * |
| 222 | * Restarts the system immediately if a restart handler function has been |
| 223 | * registered. Otherwise does nothing. |
| 224 | */ |
| 225 | void do_kernel_restart(char *cmd) |
| 226 | { |
| 227 | atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd); |
| 228 | } |
| 229 | |
| 230 | void migrate_to_reboot_cpu(void) |
| 231 | { |
| 232 | /* The boot cpu is always logical cpu 0 */ |
| 233 | int cpu = reboot_cpu; |
| 234 | |
| 235 | cpu_hotplug_disable(); |
| 236 | |
| 237 | /* Make certain the cpu I'm about to reboot on is online */ |
| 238 | if (!cpu_online(cpu)) |
| 239 | cpu = cpumask_first(cpu_online_mask); |
| 240 | |
| 241 | /* Prevent races with other tasks migrating this task */ |
| 242 | current->flags |= PF_NO_SETAFFINITY; |
| 243 | |
| 244 | /* Make certain I only run on the appropriate processor */ |
| 245 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
| 246 | } |
| 247 | |
| 248 | /** |
| 249 | * kernel_restart - reboot the system |
| 250 | * @cmd: pointer to buffer containing command to execute for restart |
| 251 | * or %NULL |
| 252 | * |
| 253 | * Shutdown everything and perform a clean reboot. |
| 254 | * This is not safe to call in interrupt context. |
| 255 | */ |
| 256 | void kernel_restart(char *cmd) |
| 257 | { |
| 258 | kernel_restart_prepare(cmd); |
| 259 | migrate_to_reboot_cpu(); |
| 260 | syscore_shutdown(); |
| 261 | if (!cmd) |
| 262 | pr_emerg("Restarting system by: %s\n", current->comm); |
| 263 | else |
| 264 | pr_emerg("Restarting system with command '%s': %s\n", cmd, current->comm); |
| 265 | kmsg_dump(KMSG_DUMP_SHUTDOWN); |
| 266 | machine_restart(cmd); |
| 267 | } |
| 268 | EXPORT_SYMBOL_GPL(kernel_restart); |
| 269 | |
| 270 | static void kernel_shutdown_prepare(enum system_states state) |
| 271 | { |
| 272 | blocking_notifier_call_chain(&reboot_notifier_list, |
| 273 | (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL); |
| 274 | system_state = state; |
| 275 | usermodehelper_disable(); |
| 276 | |
| 277 | #ifdef CONFIG_PXA_RAMDUMP |
| 278 | ramdump_rdc_reset(); /* Clean RAMDUMP request on gracefull command */ |
| 279 | #endif |
| 280 | device_shutdown(); |
| 281 | } |
| 282 | /** |
| 283 | * kernel_halt - halt the system |
| 284 | * |
| 285 | * Shutdown everything and perform a clean system halt. |
| 286 | */ |
| 287 | void kernel_halt(void) |
| 288 | { |
| 289 | kernel_shutdown_prepare(SYSTEM_HALT); |
| 290 | migrate_to_reboot_cpu(); |
| 291 | syscore_shutdown(); |
| 292 | pr_emerg("System halted\n"); |
| 293 | kmsg_dump(KMSG_DUMP_SHUTDOWN); |
| 294 | machine_halt(); |
| 295 | } |
| 296 | EXPORT_SYMBOL_GPL(kernel_halt); |
| 297 | |
| 298 | /** |
| 299 | * kernel_power_off - power_off the system |
| 300 | * |
| 301 | * Shutdown everything and perform a clean system power_off. |
| 302 | */ |
| 303 | void kernel_power_off(void) |
| 304 | { |
| 305 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
| 306 | if (pm_power_off_prepare) |
| 307 | pm_power_off_prepare(); |
| 308 | migrate_to_reboot_cpu(); |
| 309 | syscore_shutdown(); |
| 310 | pr_emerg("Power down by: %s\n", current->comm); |
| 311 | kmsg_dump(KMSG_DUMP_SHUTDOWN); |
| 312 | machine_power_off(); |
| 313 | } |
| 314 | EXPORT_SYMBOL_GPL(kernel_power_off); |
| 315 | |
| 316 | DEFINE_MUTEX(system_transition_mutex); |
| 317 | |
b.liu | b17525e | 2025-05-14 17:22:29 +0800 | [diff] [blame] | 318 | //mbtk wyq for reboot reason |
| 319 | #include <linux/mtd/mtd.h> |
| 320 | |
| 321 | int mbtk_reboot_reason_save(const char *mtd_name, mbtk_device_info_reboot_flag_enum reboot_flag) |
| 322 | { |
| 323 | int err; |
| 324 | uint32 ret_err = MBTK_REBOOT_RESULT_FAIL; |
| 325 | uint32 read_len = 0; |
| 326 | uint32 write_len = 0; |
| 327 | uint8 *data_buff = NULL; |
| 328 | struct erase_info erase_info = {0}; |
| 329 | mbtk_device_info_header_t *header = NULL; |
| 330 | mbtk_device_info_basic_t *info_basic = NULL; |
| 331 | //struct mtd_info *mtd = get_mtd_device(NULL, mtd_num); |
| 332 | struct mtd_info *mtd = get_mtd_device_nm(mtd_name); |
| 333 | if(IS_ERR(mtd)) { |
| 334 | err = PTR_ERR(mtd); |
| 335 | pr_emerg("error: Cannot get mtd device[%d]\n", err); |
| 336 | return ret_err; |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | pr_emerg("device_info:size[0x%08llx] erase_size[0x%08x] write_size[0x%08x] oob_size[0x%08x]\n", |
| 341 | mtd->size, mtd->erasesize, mtd->writesize, mtd->oobsize); |
| 342 | */ |
| 343 | data_buff = (uint8 *)vmalloc(mtd->erasesize); |
| 344 | if (!data_buff) { |
| 345 | pr_emerg("mtd vmalloc err\n"); |
| 346 | goto fail; |
| 347 | } |
| 348 | |
| 349 | if (mtd_read(mtd, (loff_t)0 * mtd->erasesize, mtd->erasesize, &read_len, data_buff)) { |
| 350 | pr_emerg("mtd mtd_read err\n"); |
| 351 | goto fail; |
| 352 | } |
| 353 | |
| 354 | if(mtd->erasesize != read_len) |
| 355 | { |
| 356 | pr_emerg("read_len[0x%x] erasesize[0x%x] inequality\n", read_len, mtd->erasesize); |
| 357 | goto fail; |
| 358 | } |
| 359 | |
| 360 | header = (mbtk_device_info_header_t*)data_buff; |
| 361 | if(header->tag != MBTK_DEVICE_INFO_PARTITION_TAG || header->version != MBTK_DEVICE_INFO_CURR_VERSION) |
| 362 | { |
| 363 | pr_emerg("tag: 0x%08x verson: %d\n", header->tag, header->version); |
| 364 | goto fail; |
| 365 | } |
| 366 | |
| 367 | info_basic = (mbtk_device_info_basic_t*)(data_buff + header->item_header[MBTK_DEVICE_INFO_ITEM_BASIC].addr); |
| 368 | if(info_basic->reboot_flag == MBTK_REBOOT_FLAG_NORMAL) |
| 369 | { |
| 370 | info_basic->reboot_flag = reboot_flag; |
| 371 | } |
| 372 | else |
| 373 | { |
| 374 | goto success; |
| 375 | } |
| 376 | |
| 377 | memset(&erase_info, 0x0, sizeof(struct erase_info)); |
| 378 | erase_info.addr = 0; |
| 379 | erase_info.len = mtd->erasesize; |
| 380 | |
| 381 | err = mtd_erase(mtd, &erase_info); |
| 382 | if(err) |
| 383 | { |
| 384 | pr_emerg("mtd_erase err[%d]\n", err); |
| 385 | goto fail; |
| 386 | } |
| 387 | |
| 388 | if (mtd_write(mtd, (loff_t)0 * mtd->erasesize, mtd->erasesize, &write_len, data_buff)) { |
| 389 | pr_emerg("mtd mtd_write err\n"); |
| 390 | goto fail; |
| 391 | } |
| 392 | |
| 393 | if(mtd->erasesize != write_len) |
| 394 | { |
| 395 | pr_emerg("write_len[0x%x] erasesize[0x%x] inequality\n", write_len, mtd->erasesize); |
| 396 | goto fail; |
| 397 | } |
| 398 | |
| 399 | success: |
| 400 | ret_err = MBTK_REBOOT_RESULT_SUCCESS; |
| 401 | |
| 402 | fail: |
| 403 | if(data_buff) { |
| 404 | vfree(data_buff); |
| 405 | data_buff = NULL; |
| 406 | } |
| 407 | if (mtd) { |
| 408 | put_mtd_device(mtd); |
| 409 | mtd = NULL; |
| 410 | } |
| 411 | return ret_err; |
| 412 | } |
| 413 | //mbtk wyq for reboot reason |
| 414 | |
b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 415 | /* |
| 416 | * Reboot system call: for obvious reasons only root may call it, |
| 417 | * and even root needs to set up some magic numbers in the registers |
| 418 | * so that some mistake won't make this reboot the whole machine. |
| 419 | * You can also set the meaning of the ctrl-alt-del-key here. |
| 420 | * |
| 421 | * reboot doesn't sync: do that yourself before calling this. |
| 422 | */ |
| 423 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, |
| 424 | void __user *, arg) |
| 425 | { |
| 426 | struct pid_namespace *pid_ns = task_active_pid_ns(current); |
| 427 | char buffer[256]; |
| 428 | int ret = 0; |
| 429 | struct asr_mflag *asr_flag = get_asr_mflag(); |
| 430 | |
| 431 | /* We only trust the superuser with rebooting the system. */ |
| 432 | if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT)) |
| 433 | return -EPERM; |
| 434 | |
| 435 | /* For safety, we require "magic" arguments. */ |
| 436 | if (magic1 != LINUX_REBOOT_MAGIC1 || |
| 437 | (magic2 != LINUX_REBOOT_MAGIC2 && |
| 438 | magic2 != LINUX_REBOOT_MAGIC2A && |
| 439 | magic2 != LINUX_REBOOT_MAGIC2B && |
| 440 | magic2 != LINUX_REBOOT_MAGIC2C)) |
| 441 | return -EINVAL; |
| 442 | |
b.liu | b17525e | 2025-05-14 17:22:29 +0800 | [diff] [blame] | 443 | //mbtk wyq for reboot reason |
| 444 | if(cmd) { |
| 445 | ret = mbtk_reboot_reason_save(MBTK_DEV_INFO_NAME, MBTK_REBOOT_FLAG_COMMAND); |
| 446 | if(ret) { |
| 447 | pr_emerg("mbtk_reboot_reason_save err\n"); |
| 448 | } |
| 449 | } |
| 450 | //mbtk wyq for reboot reason |
| 451 | |
b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 452 | /* |
| 453 | * If pid namespaces are enabled and the current task is in a child |
| 454 | * pid_namespace, the command is handled by reboot_pid_ns() which will |
| 455 | * call do_exit(). |
| 456 | */ |
| 457 | ret = reboot_pid_ns(pid_ns, cmd); |
| 458 | if (ret) |
| 459 | return ret; |
| 460 | |
| 461 | /* Instead of trying to make the power_off code look like |
| 462 | * halt when pm_power_off is not set do it the easy way. |
| 463 | */ |
| 464 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) |
| 465 | cmd = LINUX_REBOOT_CMD_HALT; |
| 466 | |
| 467 | mutex_lock(&system_transition_mutex); |
| 468 | switch (cmd) { |
| 469 | case LINUX_REBOOT_CMD_RESTART: |
| 470 | kernel_restart(NULL); |
| 471 | break; |
| 472 | |
| 473 | case LINUX_REBOOT_CMD_CAD_ON: |
| 474 | C_A_D = 1; |
| 475 | break; |
| 476 | |
| 477 | case LINUX_REBOOT_CMD_CAD_OFF: |
| 478 | C_A_D = 0; |
| 479 | break; |
| 480 | |
| 481 | case LINUX_REBOOT_CMD_HALT: |
| 482 | kernel_halt(); |
| 483 | do_exit(0); |
| 484 | panic("cannot halt"); |
| 485 | |
| 486 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 487 | kernel_power_off(); |
| 488 | do_exit(0); |
| 489 | break; |
| 490 | |
| 491 | case LINUX_REBOOT_CMD_RESTART2: |
| 492 | ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1); |
| 493 | if (ret < 0) { |
| 494 | ret = -EFAULT; |
| 495 | break; |
| 496 | } |
| 497 | buffer[sizeof(buffer) - 1] = '\0'; |
| 498 | |
| 499 | if(strcmp(buffer,"fastboot") == 0) |
| 500 | asr_flag->fastboot_flag = 0x46415354; |
| 501 | |
| 502 | kernel_restart(buffer); |
| 503 | break; |
| 504 | |
| 505 | #ifdef CONFIG_KEXEC_CORE |
| 506 | case LINUX_REBOOT_CMD_KEXEC: |
| 507 | ret = kernel_kexec(); |
| 508 | break; |
| 509 | #endif |
| 510 | |
| 511 | #ifdef CONFIG_HIBERNATION |
| 512 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
| 513 | ret = hibernate(); |
| 514 | break; |
| 515 | #endif |
| 516 | |
| 517 | default: |
| 518 | ret = -EINVAL; |
| 519 | break; |
| 520 | } |
| 521 | mutex_unlock(&system_transition_mutex); |
| 522 | return ret; |
| 523 | } |
| 524 | |
| 525 | static void deferred_cad(struct work_struct *dummy) |
| 526 | { |
| 527 | kernel_restart(NULL); |
| 528 | } |
| 529 | |
| 530 | /* |
| 531 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. |
| 532 | * As it's called within an interrupt, it may NOT sync: the only choice |
| 533 | * is whether to reboot at once, or just ignore the ctrl-alt-del. |
| 534 | */ |
| 535 | void ctrl_alt_del(void) |
| 536 | { |
| 537 | static DECLARE_WORK(cad_work, deferred_cad); |
| 538 | |
| 539 | if (C_A_D) |
| 540 | schedule_work(&cad_work); |
| 541 | else |
| 542 | kill_cad_pid(SIGINT, 1); |
| 543 | } |
| 544 | |
| 545 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
| 546 | static const char reboot_cmd[] = "/sbin/reboot"; |
| 547 | |
| 548 | static int run_cmd(const char *cmd) |
| 549 | { |
| 550 | char **argv; |
| 551 | static char *envp[] = { |
| 552 | "HOME=/", |
| 553 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin", |
| 554 | NULL |
| 555 | }; |
| 556 | int ret; |
| 557 | argv = argv_split(GFP_KERNEL, cmd, NULL); |
| 558 | if (argv) { |
| 559 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); |
| 560 | argv_free(argv); |
| 561 | } else { |
| 562 | ret = -ENOMEM; |
| 563 | } |
| 564 | |
| 565 | return ret; |
| 566 | } |
| 567 | |
| 568 | static int __orderly_reboot(void) |
| 569 | { |
| 570 | int ret; |
| 571 | |
| 572 | ret = run_cmd(reboot_cmd); |
| 573 | |
| 574 | if (ret) { |
| 575 | pr_warn("Failed to start orderly reboot: forcing the issue\n"); |
| 576 | emergency_sync(); |
| 577 | kernel_restart(NULL); |
| 578 | } |
| 579 | |
| 580 | return ret; |
| 581 | } |
| 582 | |
| 583 | static int __orderly_poweroff(bool force) |
| 584 | { |
| 585 | int ret; |
| 586 | |
| 587 | ret = run_cmd(poweroff_cmd); |
| 588 | |
| 589 | if (ret && force) { |
| 590 | pr_warn("Failed to start orderly shutdown: forcing the issue\n"); |
| 591 | |
| 592 | /* |
| 593 | * I guess this should try to kick off some daemon to sync and |
| 594 | * poweroff asap. Or not even bother syncing if we're doing an |
| 595 | * emergency shutdown? |
| 596 | */ |
| 597 | emergency_sync(); |
| 598 | kernel_power_off(); |
| 599 | } |
| 600 | |
| 601 | return ret; |
| 602 | } |
| 603 | |
| 604 | static bool poweroff_force; |
| 605 | |
| 606 | static void poweroff_work_func(struct work_struct *work) |
| 607 | { |
| 608 | __orderly_poweroff(poweroff_force); |
| 609 | } |
| 610 | |
| 611 | static DECLARE_WORK(poweroff_work, poweroff_work_func); |
| 612 | |
| 613 | /** |
| 614 | * orderly_poweroff - Trigger an orderly system poweroff |
| 615 | * @force: force poweroff if command execution fails |
| 616 | * |
| 617 | * This may be called from any context to trigger a system shutdown. |
| 618 | * If the orderly shutdown fails, it will force an immediate shutdown. |
| 619 | */ |
| 620 | void orderly_poweroff(bool force) |
| 621 | { |
| 622 | if (force) /* do not override the pending "true" */ |
| 623 | poweroff_force = true; |
| 624 | schedule_work(&poweroff_work); |
| 625 | } |
| 626 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
| 627 | |
| 628 | static void reboot_work_func(struct work_struct *work) |
| 629 | { |
| 630 | __orderly_reboot(); |
| 631 | } |
| 632 | |
| 633 | static DECLARE_WORK(reboot_work, reboot_work_func); |
| 634 | |
| 635 | /** |
| 636 | * orderly_reboot - Trigger an orderly system reboot |
| 637 | * |
| 638 | * This may be called from any context to trigger a system reboot. |
| 639 | * If the orderly reboot fails, it will force an immediate reboot. |
| 640 | */ |
| 641 | void orderly_reboot(void) |
| 642 | { |
| 643 | schedule_work(&reboot_work); |
| 644 | } |
| 645 | EXPORT_SYMBOL_GPL(orderly_reboot); |
| 646 | |
| 647 | static int __init reboot_setup(char *str) |
| 648 | { |
| 649 | for (;;) { |
| 650 | enum reboot_mode *mode; |
| 651 | |
| 652 | /* |
| 653 | * Having anything passed on the command line via |
| 654 | * reboot= will cause us to disable DMI checking |
| 655 | * below. |
| 656 | */ |
| 657 | reboot_default = 0; |
| 658 | |
| 659 | if (!strncmp(str, "panic_", 6)) { |
| 660 | mode = &panic_reboot_mode; |
| 661 | str += 6; |
| 662 | } else { |
| 663 | mode = &reboot_mode; |
| 664 | } |
| 665 | |
| 666 | switch (*str) { |
| 667 | case 'w': |
| 668 | *mode = REBOOT_WARM; |
| 669 | break; |
| 670 | |
| 671 | case 'c': |
| 672 | *mode = REBOOT_COLD; |
| 673 | break; |
| 674 | |
| 675 | case 'h': |
| 676 | *mode = REBOOT_HARD; |
| 677 | break; |
| 678 | |
| 679 | case 's': |
| 680 | if (isdigit(*(str+1))) |
| 681 | reboot_cpu = simple_strtoul(str+1, NULL, 0); |
| 682 | else if (str[1] == 'm' && str[2] == 'p' && |
| 683 | isdigit(*(str+3))) |
| 684 | reboot_cpu = simple_strtoul(str+3, NULL, 0); |
| 685 | else |
| 686 | *mode = REBOOT_SOFT; |
| 687 | if (reboot_cpu >= num_possible_cpus()) { |
| 688 | pr_err("Ignoring the CPU number in reboot= option. " |
| 689 | "CPU %d exceeds possible cpu number %d\n", |
| 690 | reboot_cpu, num_possible_cpus()); |
| 691 | reboot_cpu = 0; |
| 692 | break; |
| 693 | } |
| 694 | break; |
| 695 | |
| 696 | case 'g': |
| 697 | *mode = REBOOT_GPIO; |
| 698 | break; |
| 699 | |
| 700 | case 'b': |
| 701 | case 'a': |
| 702 | case 'k': |
| 703 | case 't': |
| 704 | case 'e': |
| 705 | case 'p': |
| 706 | reboot_type = *str; |
| 707 | break; |
| 708 | |
| 709 | case 'f': |
| 710 | reboot_force = 1; |
| 711 | break; |
| 712 | } |
| 713 | |
| 714 | str = strchr(str, ','); |
| 715 | if (str) |
| 716 | str++; |
| 717 | else |
| 718 | break; |
| 719 | } |
| 720 | return 1; |
| 721 | } |
| 722 | __setup("reboot=", reboot_setup); |