Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
/*
 *  arch/s390/kernel/entry64.S
 *    S390 low-level entry points.
 *
 *    Copyright (C) IBM Corp. 1999,2010
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
 *		 Hartmut Penner (hp@de.ibm.com),
 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/page.h>

/*
 * Stack layout for the system_call stack entry.
 * The first few entries are identical to the user_regs_struct.
 */
SP_PTREGS    =	STACK_FRAME_OVERHEAD
SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 64
SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 72
SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 80
SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 88
SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 96
SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 104
SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 112
SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 120
SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
SP_SVC_CODE  =	STACK_FRAME_OVERHEAD + __PT_SVC_CODE
SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE

STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE  = 1 << STACK_SHIFT

_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
		 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
		 _TIF_MCCK_PENDING)
_TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
		 _TIF_SYSCALL_TRACEPOINT)
_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)

#define BASED(name) name-system_call(%r13)

	.macro SPP newpp
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_SPP
	jz	.+8
	.insn	s,0xb2800000,\newpp
#endif
	.endm

	.macro	HANDLE_SIE_INTERCEPT
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
	tm	__TI_flags+6(%r12),_TIF_SIE>>8
	jz	0f
	SPP	__LC_CMF_HPP			# set host id
	clc	SP_PSW+8(8,%r15),BASED(.Lsie_loop)
	jl	0f
	clc	SP_PSW+8(8,%r15),BASED(.Lsie_done)
	jhe	0f
	mvc	SP_PSW+8(8,%r15),BASED(.Lsie_loop)
0:
#endif
	.endm

#ifdef CONFIG_TRACE_IRQFLAGS
	.macro	TRACE_IRQS_ON
	basr	%r2,%r0
	brasl	%r14,trace_hardirqs_on_caller
	.endm

	.macro	TRACE_IRQS_OFF
	basr	%r2,%r0
	brasl	%r14,trace_hardirqs_off_caller
	.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
#endif

#ifdef CONFIG_LOCKDEP
	.macro	LOCKDEP_SYS_EXIT
	tm	SP_PSW+1(%r15),0x01	# returning to user ?
	jz	0f
	brasl	%r14,lockdep_sys_exit
0:
	.endm
#else
#define LOCKDEP_SYS_EXIT
#endif

	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
	lg	%r10,\lc_from
	slg	%r10,\lc_to
	alg	%r10,\lc_sum
	stg	%r10,\lc_sum
	.endm

/*
 * Register usage in interrupt handlers:
 *    R9  - pointer to current task structure
 *    R13 - pointer to literal pool
 *    R14 - return register for function calls
 *    R15 - kernel stack pointer
 */

	.macro	SAVE_ALL_SVC psworg,savearea
	stmg	%r11,%r15,\savearea
	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
	aghi	%r15,-SP_SIZE		# make room for registers & psw
	lg	%r11,__LC_LAST_BREAK
	.endm

	.macro	SAVE_ALL_PGM psworg,savearea
	stmg	%r11,%r15,\savearea
	tm	\psworg+1,0x01		# test problem state bit
#ifdef CONFIG_CHECK_STACK
	jnz	1f
	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
	jnz	2f
	la	%r12,\psworg
	j	stack_overflow
#else
	jz	2f
#endif
1:	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
2:	aghi	%r15,-SP_SIZE		# make room for registers & psw
	larl	%r13,system_call
	lg	%r11,__LC_LAST_BREAK
	.endm

	.macro	SAVE_ALL_ASYNC psworg,savearea
	stmg	%r11,%r15,\savearea
	larl	%r13,system_call
	lg	%r11,__LC_LAST_BREAK
	la	%r12,\psworg
	tm	\psworg+1,0x01		# test problem state bit
	jnz	1f			# from user -> load kernel stack
	clc	\psworg+8(8),BASED(.Lcritical_end)
	jhe	0f
	clc	\psworg+8(8),BASED(.Lcritical_start)
	jl	0f
	brasl	%r14,cleanup_critical
	tm	1(%r12),0x01		# retest problem state after cleanup
	jnz	1f
0:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async. stack ?
	slgr	%r14,%r15
	srag	%r14,%r14,STACK_SHIFT
#ifdef CONFIG_CHECK_STACK
	jnz	1f
	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
	jnz	2f
	j	stack_overflow
#else
	jz	2f
#endif
1:	lg	%r15,__LC_ASYNC_STACK	# load async stack
2:	aghi	%r15,-SP_SIZE		# make room for registers & psw
	.endm

	.macro	CREATE_STACK_FRAME savearea
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
	stg	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
	mvc	SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
	stmg	%r0,%r10,SP_R0(%r15)	# store gprs %r0-%r10 to kernel stack
	.endm

	.macro	RESTORE_ALL psworg,sync
	mvc	\psworg(16),SP_PSW(%r15) # move user PSW to lowcore
	.if !\sync
	ni	\psworg+1,0xfd		# clear wait state bit
	.endif
	lg	%r14,__LC_VDSO_PER_CPU
	lmg	%r0,%r13,SP_R0(%r15)	# load gprs 0-13 of user
	stpt	__LC_EXIT_TIMER
	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
	lmg	%r14,%r15,SP_R14(%r15)	# load grps 14-15 of user
	lpswe	\psworg			# back to caller
	.endm

	.macro	LAST_BREAK
	srag	%r10,%r11,23
	jz	0f
	stg	%r11,__TI_last_break(%r12)
0:
	.endm

	.macro REENABLE_IRQS
	mvc	__SF_EMPTY(1,%r15),SP_PSW(%r15)
	ni	__SF_EMPTY(%r15),0xbf
	ssm	__SF_EMPTY(%r15)
	.endm

	.section .kprobes.text, "ax"

/*
 * Scheduler resume function, called by switch_to
 *  gpr2 = (task_struct *) prev
 *  gpr3 = (task_struct *) next
 * Returns:
 *  gpr2 = prev
 */
ENTRY(__switch_to)
	lg	%r4,__THREAD_info(%r2)		# get thread_info of prev
	lg	%r5,__THREAD_info(%r3)		# get thread_info of next
	tm	__TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
	jz	0f
	ni	__TI_flags+7(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
	oi	__TI_flags+7(%r5),_TIF_MCCK_PENDING	# set it in next
0:	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
	stg	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
	lg	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
	stg	%r3,__LC_CURRENT		# store task struct of next
	mvc	__LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
	stg	%r5,__LC_THREAD_INFO		# store thread info of next
	aghi	%r5,STACK_SIZE			# end of kernel stack of next
	stg	%r5,__LC_KERNEL_STACK		# store end of kernel stack
	br	%r14

__critical_start:
/*
 * SVC interrupt handler routine. System calls are synchronous events and
 * are executed with interrupts enabled.
 */

ENTRY(system_call)
	stpt	__LC_SYNC_ENTER_TIMER
sysc_saveall:
	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
	CREATE_STACK_FRAME __LC_SAVE_AREA
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	mvc	SP_PSW(16,%r15),__LC_SVC_OLD_PSW
	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
	oi	__TI_flags+7(%r12),_TIF_SYSCALL
sysc_vtime:
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
sysc_stime:
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
sysc_update:
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
	LAST_BREAK
sysc_do_svc:
	llgh	%r7,SP_SVC_CODE+2(%r15)
	slag	%r7,%r7,2	# shift and test for svc 0
	jnz	sysc_nr_ok
	# svc 0: system call number in %r1
	llgfr	%r1,%r1		# clear high word in r1
	cghi	%r1,NR_syscalls
	jnl	sysc_nr_ok
	sth	%r1,SP_SVC_CODE+2(%r15)
	slag	%r7,%r1,2	# shift and test for svc 0
sysc_nr_ok:
	larl	%r10,sys_call_table
#ifdef CONFIG_COMPAT
	tm	__TI_flags+5(%r12),(_TIF_31BIT>>16)  # running in 31 bit mode ?
	jno	sysc_noemu
	larl	%r10,sys_call_table_emu  # use 31 bit emulation system calls
sysc_noemu:
#endif
	tm	__TI_flags+6(%r12),_TIF_TRACE >> 8
	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
	lgf	%r8,0(%r7,%r10) # load address of system call routine
	jnz	sysc_tracesys
	basr	%r14,%r8	# call sys_xxxx
	stg	%r2,SP_R2(%r15) # store return value (change R2 on stack)

sysc_return:
	LOCKDEP_SYS_EXIT
sysc_tif:
	tm	SP_PSW+1(%r15),0x01	# returning to user ?
	jno	sysc_restore
	tm	__TI_flags+7(%r12),_TIF_WORK_SVC
	jnz	sysc_work	# there is work to do (signals etc.)
	ni	__TI_flags+7(%r12),255-_TIF_SYSCALL
sysc_restore:
	RESTORE_ALL __LC_RETURN_PSW,1
sysc_done:

#
# One of the work bits is on. Find out which one.
#
sysc_work:
	tm	__TI_flags+7(%r12),_TIF_MCCK_PENDING
	jo	sysc_mcck_pending
	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
	jo	sysc_reschedule
	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
	jo	sysc_sigpending
	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
	jo	sysc_notify_resume
	tm	__TI_flags+7(%r12),_TIF_PER_TRAP
	jo	sysc_singlestep
	j	sysc_return		# beware of critical section cleanup

#
# _TIF_NEED_RESCHED is set, call schedule
#
sysc_reschedule:
	larl	%r14,sysc_return
	jg	schedule		# return point is sysc_return

#
# _TIF_MCCK_PENDING is set, call handler
#
sysc_mcck_pending:
	larl	%r14,sysc_return
	jg	s390_handle_mcck	# TIF bit will be cleared by handler

#
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
	ni	__TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	brasl	%r14,do_signal		# call do_signal
	tm	__TI_flags+7(%r12),_TIF_SYSCALL
	jno	sysc_return
	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
	lghi	%r7,0			# svc 0 returns -ENOSYS
	lh	%r1,SP_SVC_CODE+2(%r15)	# load new svc number
	cghi	%r1,NR_syscalls
	jnl	sysc_nr_ok		# invalid svc number -> do svc 0
	slag	%r7,%r1,2
	j	sysc_nr_ok		# restart svc

#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
sysc_notify_resume:
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	larl	%r14,sysc_return
	jg	do_notify_resume	# call do_notify_resume

#
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
	ni	__TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
	la	%r2,SP_PTREGS(%r15)	# address of register-save area
	larl	%r14,sysc_return	# load adr. of system return
	jg	do_per_trap

#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
sysc_tracesys:
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	la	%r3,0
	llgh	%r0,SP_SVC_CODE+2(%r15)
	stg	%r0,SP_R2(%r15)
	brasl	%r14,do_syscall_trace_enter
	lghi	%r0,NR_syscalls
	clgr	%r0,%r2
	jnh	sysc_tracenogo
	sllg	%r7,%r2,2		# svc number *4
	lgf	%r8,0(%r7,%r10)
sysc_tracego:
	lmg	%r3,%r6,SP_R3(%r15)
	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
	lg	%r2,SP_ORIG_R2(%r15)
	basr	%r14,%r8		# call sys_xxx
	stg	%r2,SP_R2(%r15)		# store return value
sysc_tracenogo:
	tm	__TI_flags+6(%r12),_TIF_TRACE >> 8
	jz	sysc_return
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	larl	%r14,sysc_return	# return point is sysc_return
	jg	do_syscall_trace_exit

#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
	lg	%r13,__LC_SVC_NEW_PSW+8
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
	jo	0f
	stg	%r15,SP_R15(%r15)	# store stack pointer for new kthread
0:	brasl	%r14,schedule_tail
	TRACE_IRQS_ON
	stosm	24(%r15),0x03		# reenable interrupts
	j	sysc_tracenogo

#
# kernel_execve function needs to deal with pt_regs that is not
# at the usual place
#
ENTRY(kernel_execve)
	stmg	%r12,%r15,96(%r15)
	lgr	%r14,%r15
	aghi	%r15,-SP_SIZE
	stg	%r14,__SF_BACKCHAIN(%r15)
	la	%r12,SP_PTREGS(%r15)
	xc	0(__PT_SIZE,%r12),0(%r12)
	lgr	%r5,%r12
	brasl	%r14,do_execve
	ltgfr	%r2,%r2
	je	0f
	aghi	%r15,SP_SIZE
	lmg	%r12,%r15,96(%r15)
	br	%r14
	# execve succeeded.
0:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
	lg	%r15,__LC_KERNEL_STACK	# load ksp
	aghi	%r15,-SP_SIZE		# make room for registers & psw
	lg	%r13,__LC_SVC_NEW_PSW+8
	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
	lg	%r12,__LC_THREAD_INFO
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
	brasl	%r14,execve_tail
	j	sysc_return

/*
 * Program check handler routine
 */

ENTRY(pgm_check_handler)
/*
 * First we need to check for a special case:
 * Single stepping an instruction that disables the PER event mask will
 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
 * For a single stepped SVC the program check handler gets control after
 * the SVC new PSW has been loaded. But we want to execute the SVC first and
 * then handle the PER event. Therefore we update the SVC old PSW to point
 * to the pgm_check_handler and branch to the SVC handler after we checked
 * if we have to load the kernel stack register.
 * For every other possible cause for PER event without the PER mask set
 * we just ignore the PER event (FIXME: is there anything we have to do
 * for LPSW?).
 */
	stpt	__LC_SYNC_ENTER_TIMER
	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
	jnz	pgm_per 		 # got per exception -> special case
	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
	CREATE_STACK_FRAME __LC_SAVE_AREA
	mvc	SP_PSW(16,%r15),__LC_PGM_OLD_PSW
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	HANDLE_SIE_INTERCEPT
	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
	jz	pgm_no_vtime
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
	LAST_BREAK
pgm_no_vtime:
	stg	%r11,SP_ARGS(%r15)
	lgf	%r3,__LC_PGM_ILC	# load program interruption code
	lg	%r4,__LC_TRANS_EXC_CODE
	REENABLE_IRQS
	lghi	%r8,0x7f
	ngr	%r8,%r3
	sll	%r8,3
	larl	%r1,pgm_check_table
	lg	%r1,0(%r8,%r1)		# load address of handler routine
	la	%r2,SP_PTREGS(%r15)	# address of register-save area
	basr	%r14,%r1		# branch to interrupt-handler
pgm_exit:
	j	sysc_return

#
# handle per exception
#
pgm_per:
	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
	jnz	pgm_per_std		# ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
	clc	__LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
	je	pgm_svcper
# no interesting special case, ignore PER event
	lpswe	__LC_PGM_OLD_PSW

#
# Normal per exception
#
pgm_per_std:
	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
	CREATE_STACK_FRAME __LC_SAVE_AREA
	mvc	SP_PSW(16,%r15),__LC_PGM_OLD_PSW
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	HANDLE_SIE_INTERCEPT
	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
	jz	pgm_no_vtime2
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
	LAST_BREAK
pgm_no_vtime2:
	lg	%r1,__TI_task(%r12)
	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
	jz	kernel_per
	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
	mvc	__THREAD_per_address(8,%r1),__LC_PER_ADDRESS
	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
	oi	__TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
	lgf	%r3,__LC_PGM_ILC	# load program interruption code
	lg	%r4,__LC_TRANS_EXC_CODE
	REENABLE_IRQS
	lghi	%r8,0x7f
	ngr	%r8,%r3			# clear per-event-bit and ilc
	je	pgm_exit2
	sll	%r8,3
	larl	%r1,pgm_check_table
	lg	%r1,0(%r8,%r1)		# load address of handler routine
	la	%r2,SP_PTREGS(%r15)	# address of register-save area
	basr	%r14,%r1		# branch to interrupt-handler
pgm_exit2:
	j	sysc_return

#
# it was a single stepped SVC that is causing all the trouble
#
pgm_svcper:
	SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
	CREATE_STACK_FRAME __LC_SAVE_AREA
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	mvc	SP_PSW(16,%r15),__LC_SVC_OLD_PSW
	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
	oi	__TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
	LAST_BREAK
	lg	%r8,__TI_task(%r12)
	mvc	__THREAD_per_cause(2,%r8),__LC_PER_CAUSE
	mvc	__THREAD_per_address(8,%r8),__LC_PER_ADDRESS
	mvc	__THREAD_per_paid(1,%r8),__LC_PER_PAID
	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
	j	sysc_do_svc

#
# per was called from kernel, must be kprobes
#
kernel_per:
	REENABLE_IRQS
	la	%r2,SP_PTREGS(%r15)	# address of register-save area
	brasl	%r14,do_per_trap
	j	pgm_exit

/*
 * IO interrupt handler routine
 */
ENTRY(io_int_handler)
	stck	__LC_INT_CLOCK
	stpt	__LC_ASYNC_ENTER_TIMER
	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
	CREATE_STACK_FRAME __LC_SAVE_AREA+40
	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	HANDLE_SIE_INTERCEPT
	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
	jz	io_no_vtime
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
	LAST_BREAK
io_no_vtime:
	TRACE_IRQS_OFF
	la	%r2,SP_PTREGS(%r15)	# address of register-save area
	brasl	%r14,do_IRQ		# call standard irq handler
io_return:
	LOCKDEP_SYS_EXIT
	TRACE_IRQS_ON
io_tif:
	tm	__TI_flags+7(%r12),_TIF_WORK_INT
	jnz	io_work 		# there is work to do (signals etc.)
io_restore:
	RESTORE_ALL __LC_RETURN_PSW,0
io_done:

#
# There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK_INT work
# 2) if we return to kernel code and kvm is enabled check if we need to
#    modify the psw to leave SIE
# 3) if we return to kernel code and preemptive scheduling is enabled check
#    the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
	tm	SP_PSW+1(%r15),0x01	# returning to user ?
	jo	io_work_user		# yes -> do resched & signal
#ifdef CONFIG_PREEMPT
	# check for preemptive scheduling
	icm	%r0,15,__TI_precount(%r12)
	jnz	io_restore		# preemption is disabled
	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
	jno	io_restore
	# switch to kernel stack
	lg	%r1,SP_R15(%r15)
	aghi	%r1,-SP_SIZE
	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
	lgr	%r15,%r1
	# TRACE_IRQS_ON already done at io_return, call
	# TRACE_IRQS_OFF to keep things symmetrical
	TRACE_IRQS_OFF
	brasl	%r14,preempt_schedule_irq
	j	io_return
#else
	j	io_restore
#endif

#
# Need to do work before returning to userspace, switch to kernel stack
#
io_work_user:
	lg	%r1,__LC_KERNEL_STACK
	aghi	%r1,-SP_SIZE
	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
	lgr	%r15,%r1

#
# One of the work bits is on. Find out which one.
# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
#	       and _TIF_MCCK_PENDING
#
io_work_tif:
	tm	__TI_flags+7(%r12),_TIF_MCCK_PENDING
	jo	io_mcck_pending
	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
	jo	io_reschedule
	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
	jo	io_sigpending
	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
	jo	io_notify_resume
	j	io_return		# beware of critical section cleanup

#
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
	# TRACE_IRQS_ON already done at io_return
	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
	TRACE_IRQS_OFF
	j	io_return

#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
	# TRACE_IRQS_ON already done at io_return
	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
	brasl	%r14,schedule		# call scheduler
	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
	TRACE_IRQS_OFF
	j	io_return

#
# _TIF_SIGPENDING or is set, call do_signal
#
io_sigpending:
	# TRACE_IRQS_ON already done at io_return
	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	brasl	%r14,do_signal		# call do_signal
	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
	TRACE_IRQS_OFF
	j	io_return

#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
io_notify_resume:
	# TRACE_IRQS_ON already done at io_return
	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	brasl	%r14,do_notify_resume	# call do_notify_resume
	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
	TRACE_IRQS_OFF
	j	io_return

/*
 * External interrupt handler routine
 */
ENTRY(ext_int_handler)
	stck	__LC_INT_CLOCK
	stpt	__LC_ASYNC_ENTER_TIMER
	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
	CREATE_STACK_FRAME __LC_SAVE_AREA+40
	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	HANDLE_SIE_INTERCEPT
	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
	jz	ext_no_vtime
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
	LAST_BREAK
ext_no_vtime:
	TRACE_IRQS_OFF
	lghi	%r1,4096
	la	%r2,SP_PTREGS(%r15)	# address of register-save area
	llgf	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
	llgf	%r4,__LC_EXT_PARAMS	# get external parameter
	lg	%r5,__LC_EXT_PARAMS2-4096(%r1)	# get 64 bit external parameter
	brasl	%r14,do_extint
	j	io_return

__critical_end:

/*
 * Machine check handler routines
 */
ENTRY(mcck_int_handler)
	stck	__LC_MCCK_CLOCK
	la	%r1,4095		# revalidate r1
	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
	stmg	%r11,%r15,__LC_SAVE_AREA+80
	larl	%r13,system_call
	lg	%r11,__LC_LAST_BREAK
	la	%r12,__LC_MCK_OLD_PSW
	tm	__LC_MCCK_CODE,0x80	# system damage?
	jo	mcck_int_main		# yes -> rest of mcck code invalid
	la	%r14,4095
	mvc	__LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
	jo	1f
	la	%r14,__LC_SYNC_ENTER_TIMER
	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
	jl	0f
	la	%r14,__LC_ASYNC_ENTER_TIMER
0:	clc	0(8,%r14),__LC_EXIT_TIMER
	jl	0f
	la	%r14,__LC_EXIT_TIMER
0:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
	jl	0f
	la	%r14,__LC_LAST_UPDATE_TIMER
0:	spt	0(%r14)
	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
1:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
	jno	mcck_int_main		# no -> skip cleanup critical
	tm	__LC_MCK_OLD_PSW+1,0x01 # test problem state bit
	jnz	mcck_int_main		# from user -> load kernel stack
	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
	jhe	mcck_int_main
	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
	jl	mcck_int_main
	brasl	%r14,cleanup_critical
mcck_int_main:
	lg	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
	slgr	%r14,%r15
	srag	%r14,%r14,PAGE_SHIFT
	jz	0f
	lg	%r15,__LC_PANIC_STACK	# load panic stack
0:	aghi	%r15,-SP_SIZE		# make room for registers & psw
	CREATE_STACK_FRAME __LC_SAVE_AREA+80
	mvc	SP_PSW(16,%r15),0(%r12)
	lg	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
	jno	mcck_no_vtime		# no -> no timer update
	HANDLE_SIE_INTERCEPT
	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
	jz	mcck_no_vtime
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
	LAST_BREAK
mcck_no_vtime:
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	brasl	%r14,s390_do_machine_check
	tm	SP_PSW+1(%r15),0x01	# returning to user ?
	jno	mcck_return
	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
	aghi	%r1,-SP_SIZE
	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
	lgr	%r15,%r1
	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
	tm	__TI_flags+7(%r12),_TIF_MCCK_PENDING
	jno	mcck_return
	TRACE_IRQS_OFF
	brasl	%r14,s390_handle_mcck
	TRACE_IRQS_ON
mcck_return:
	mvc	__LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
	lmg	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
	jno	0f
	stpt	__LC_EXIT_TIMER
0:	lpswe	__LC_RETURN_MCCK_PSW	# back to caller
mcck_done:

/*
 * Restart interruption handler, kick starter for additional CPUs
 */
#ifdef CONFIG_SMP
	__CPUINIT
ENTRY(restart_int_handler)
	basr	%r1,0
restart_base:
	spt	restart_vtime-restart_base(%r1)
	stck	__LC_LAST_UPDATE_CLOCK
	mvc	__LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
	mvc	__LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
	lg	%r15,__LC_SAVE_AREA+120 # load ksp
	lghi	%r10,__LC_CREGS_SAVE_AREA
	lctlg	%c0,%c15,0(%r10) # get new ctl regs
	lghi	%r10,__LC_AREGS_SAVE_AREA
	lam	%a0,%a15,0(%r10)
	lmg	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
	lg	%r1,__LC_THREAD_INFO
	mvc	__LC_USER_TIMER(8),__TI_user_timer(%r1)
	mvc	__LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
	xc	__LC_STEAL_TIMER(8),__LC_STEAL_TIMER
	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
	brasl	%r14,start_secondary
	.align	8
restart_vtime:
	.long	0x7fffffff,0xffffffff
	.previous
#else
/*
 * If we do not run with SMP enabled, let the new CPU crash ...
 */
ENTRY(restart_int_handler)
	basr	%r1,0
restart_base:
	lpswe	restart_crash-restart_base(%r1)
	.align 8
restart_crash:
	.long  0x000a0000,0x00000000,0x00000000,0x00000000
restart_go:
#endif

#
# PSW restart interrupt handler
#
ENTRY(psw_restart_int_handler)
	stg	%r15,__LC_SAVE_AREA+120(%r0)	# save r15
	larl	%r15,restart_stack		# load restart stack
	lg	%r15,0(%r15)
	aghi	%r15,-SP_SIZE			# make room for pt_regs
	stmg	%r0,%r14,SP_R0(%r15)		# store gprs %r0-%r14 to stack
	mvc	SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack
	mvc	SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
	brasl	%r14,do_restart

	larl	%r14,restart_psw_crash		# load disabled wait PSW if
	lpswe	0(%r14)				# do_restart returns
	.align 8
restart_psw_crash:
	.quad	0x0002000080000000,0x0000000000000000 + restart_psw_crash

	.section .kprobes.text, "ax"

#ifdef CONFIG_CHECK_STACK
/*
 * The synchronous or the asynchronous stack overflowed. We are dead.
 * No need to properly save the registers, we are going to panic anyway.
 * Setup a pt_regs so that show_trace can provide a good call trace.
 */
stack_overflow:
	lg	%r15,__LC_PANIC_STACK	# change to panic stack
	aghi	%r15,-SP_SIZE
	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
	stmg	%r0,%r10,SP_R0(%r15)	# store gprs %r0-%r10 to kernel stack
	la	%r1,__LC_SAVE_AREA
	chi	%r12,__LC_SVC_OLD_PSW
	je	0f
	chi	%r12,__LC_PGM_OLD_PSW
	je	0f
	la	%r1,__LC_SAVE_AREA+40
0:	mvc	SP_R11(40,%r15),0(%r1)	# move %r11-%r15 to stack
	mvc	SP_ARGS(8,%r15),__LC_LAST_BREAK
	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
	la	%r2,SP_PTREGS(%r15)	# load pt_regs
	jg	kernel_stack_overflow
#endif

cleanup_table_system_call:
	.quad	system_call, sysc_do_svc
cleanup_table_sysc_tif:
	.quad	sysc_tif, sysc_restore
cleanup_table_sysc_restore:
	.quad	sysc_restore, sysc_done
cleanup_table_io_tif:
	.quad	io_tif, io_restore
cleanup_table_io_restore:
	.quad	io_restore, io_done

cleanup_critical:
	clc	8(8,%r12),BASED(cleanup_table_system_call)
	jl	0f
	clc	8(8,%r12),BASED(cleanup_table_system_call+8)
	jl	cleanup_system_call
0:
	clc	8(8,%r12),BASED(cleanup_table_sysc_tif)
	jl	0f
	clc	8(8,%r12),BASED(cleanup_table_sysc_tif+8)
	jl	cleanup_sysc_tif
0:
	clc	8(8,%r12),BASED(cleanup_table_sysc_restore)
	jl	0f
	clc	8(8,%r12),BASED(cleanup_table_sysc_restore+8)
	jl	cleanup_sysc_restore
0:
	clc	8(8,%r12),BASED(cleanup_table_io_tif)
	jl	0f
	clc	8(8,%r12),BASED(cleanup_table_io_tif+8)
	jl	cleanup_io_tif
0:
	clc	8(8,%r12),BASED(cleanup_table_io_restore)
	jl	0f
	clc	8(8,%r12),BASED(cleanup_table_io_restore+8)
	jl	cleanup_io_restore
0:
	br	%r14

cleanup_system_call:
	mvc	__LC_RETURN_PSW(16),0(%r12)
	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
	jh	0f
	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
	cghi	%r12,__LC_MCK_OLD_PSW
	je	0f
	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0:	cghi	%r12,__LC_MCK_OLD_PSW
	la	%r12,__LC_SAVE_AREA+80
	je	0f
	la	%r12,__LC_SAVE_AREA+40
0:	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
	jhe	cleanup_vtime
	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
	jh	0f
	mvc	__LC_SAVE_AREA(40),0(%r12)
0:	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
	aghi	%r15,-SP_SIZE		# make room for registers & psw
	stg	%r15,32(%r12)
	stg	%r11,0(%r12)
	CREATE_STACK_FRAME __LC_SAVE_AREA
	mvc	8(8,%r12),__LC_THREAD_INFO
	lg	%r12,__LC_THREAD_INFO
	mvc	SP_PSW(16,%r15),__LC_SVC_OLD_PSW
	mvc	SP_SVC_CODE(4,%r15),__LC_SVC_ILC
	oi	__TI_flags+7(%r12),_TIF_SYSCALL
cleanup_vtime:
	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
	jhe	cleanup_stime
	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
cleanup_stime:
	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
	jh	cleanup_update
	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
cleanup_update:
	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
	srag	%r12,%r11,23
	lg	%r12,__LC_THREAD_INFO
	jz	0f
	stg	%r11,__TI_last_break(%r12)
0:	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
	la	%r12,__LC_RETURN_PSW
	br	%r14
cleanup_system_call_insn:
	.quad	sysc_saveall
	.quad	system_call
	.quad	sysc_vtime
	.quad	sysc_stime
	.quad	sysc_update

cleanup_sysc_tif:
	mvc	__LC_RETURN_PSW(8),0(%r12)
	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
	la	%r12,__LC_RETURN_PSW
	br	%r14

cleanup_sysc_restore:
	clc	8(8,%r12),BASED(cleanup_sysc_restore_insn)
	je	2f
	clc	8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
	jhe	0f
	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
	cghi	%r12,__LC_MCK_OLD_PSW
	je	0f
	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
0:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
	cghi	%r12,__LC_MCK_OLD_PSW
	la	%r12,__LC_SAVE_AREA+80
	je	1f
	la	%r12,__LC_SAVE_AREA+40
1:	mvc	0(40,%r12),SP_R11(%r15)
	lmg	%r0,%r10,SP_R0(%r15)
	lg	%r15,SP_R15(%r15)
2:	la	%r12,__LC_RETURN_PSW
	br	%r14
cleanup_sysc_restore_insn:
	.quad	sysc_done - 4
	.quad	sysc_done - 16

cleanup_io_tif:
	mvc	__LC_RETURN_PSW(8),0(%r12)
	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
	la	%r12,__LC_RETURN_PSW
	br	%r14

cleanup_io_restore:
	clc	8(8,%r12),BASED(cleanup_io_restore_insn)
	je	1f
	clc	8(8,%r12),BASED(cleanup_io_restore_insn+8)
	jhe	0f
	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
	mvc	__LC_SAVE_AREA+80(40),SP_R11(%r15)
	lmg	%r0,%r10,SP_R0(%r15)
	lg	%r15,SP_R15(%r15)
1:	la	%r12,__LC_RETURN_PSW
	br	%r14
cleanup_io_restore_insn:
	.quad	io_done - 4
	.quad	io_done - 16

/*
 * Integer constants
 */
		.align	4
.Lcritical_start:
		.quad	__critical_start
.Lcritical_end:
		.quad	__critical_end

#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
/*
 * sie64a calling convention:
 * %r2 pointer to sie control block
 * %r3 guest register save area
 */
ENTRY(sie64a)
	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
	oi	__TI_flags+6(%r14),_TIF_SIE>>8
sie_loop:
	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
	tm	__TI_flags+7(%r14),_TIF_EXIT_SIE
	jnz	sie_exit
	lg	%r14,__LC_GMAP			# get gmap pointer
	ltgr	%r14,%r14
	jz	sie_gmap
	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
sie_gmap:
	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
	SPP	__SF_EMPTY(%r15)		# set guest id
	sie	0(%r14)
sie_done:
	SPP	__LC_CMF_HPP			# set host id
	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
sie_exit:
	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
	ni	__TI_flags+6(%r14),255-(_TIF_SIE>>8)
	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
	lghi	%r2,0
	br	%r14
sie_fault:
	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
	ni	__TI_flags+6(%r14),255-(_TIF_SIE>>8)
	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
	lghi	%r2,-EFAULT
	br	%r14

	.align	8
.Lsie_loop:
	.quad	sie_loop
.Lsie_done:
	.quad	sie_done

	.section __ex_table,"a"
	.quad	sie_loop,sie_fault
	.previous
#endif

		.section .rodata, "a"
#define SYSCALL(esa,esame,emu)	.long esame
	.globl	sys_call_table
sys_call_table:
#include "syscalls.S"
#undef SYSCALL

#ifdef CONFIG_COMPAT

#define SYSCALL(esa,esame,emu)	.long emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
#endif