Browse Source

apply grsec

parazyd 2 years ago
parent
commit
9c8e82a6f6
100 changed files with 1667 additions and 417 deletions
  1. 45 3
      Documentation/dontdiff
  2. 31 8
      Documentation/kbuild/makefiles.txt
  3. 39 0
      Documentation/kernel-parameters.txt
  4. 15 0
      Documentation/sysctl/kernel.txt
  5. 40 15
      Makefile
  6. 10 0
      arch/alpha/include/asm/atomic.h
  7. 2 2
      arch/alpha/include/asm/cache.h
  8. 7 0
      arch/alpha/include/asm/elf.h
  9. 6 0
      arch/alpha/include/asm/pgalloc.h
  10. 11 0
      arch/alpha/include/asm/pgtable.h
  11. 1 1
      arch/alpha/kernel/module.c
  12. 12 6
      arch/alpha/kernel/osf_sys.c
  13. 140 1
      arch/alpha/mm/fault.c
  14. 1 0
      arch/arc/Kconfig
  15. 3 1
      arch/arm/Kconfig
  16. 1 0
      arch/arm/Kconfig.debug
  17. 291 32
      arch/arm/include/asm/atomic.h
  18. 4 1
      arch/arm/include/asm/cache.h
  19. 1 1
      arch/arm/include/asm/cacheflush.h
  20. 13 1
      arch/arm/include/asm/checksum.h
  21. 4 0
      arch/arm/include/asm/cmpxchg.h
  22. 1 1
      arch/arm/include/asm/cpuidle.h
  23. 38 4
      arch/arm/include/asm/domain.h
  24. 8 1
      arch/arm/include/asm/elf.h
  25. 2 0
      arch/arm/include/asm/fncpy.h
  26. 1 0
      arch/arm/include/asm/futex.h
  27. 1 1
      arch/arm/include/asm/kmap_types.h
  28. 1 1
      arch/arm/include/asm/mach/dma.h
  29. 9 7
      arch/arm/include/asm/mach/map.h
  30. 1 1
      arch/arm/include/asm/outercache.h
  31. 2 1
      arch/arm/include/asm/page.h
  32. 20 0
      arch/arm/include/asm/pgalloc.h
  33. 3 1
      arch/arm/include/asm/pgtable-2level-hwdef.h
  34. 3 0
      arch/arm/include/asm/pgtable-2level.h
  35. 3 0
      arch/arm/include/asm/pgtable-3level.h
  36. 51 3
      arch/arm/include/asm/pgtable.h
  37. 1 1
      arch/arm/include/asm/smp.h
  38. 9 1
      arch/arm/include/asm/thread_info.h
  39. 3 0
      arch/arm/include/asm/tls.h
  40. 79 34
      arch/arm/include/asm/uaccess.h
  41. 1 1
      arch/arm/include/uapi/asm/ptrace.h
  42. 1 1
      arch/arm/kernel/armksyms.c
  43. 1 1
      arch/arm/kernel/cpuidle.c
  44. 106 3
      arch/arm/kernel/entry-armv.S
  45. 43 5
      arch/arm/kernel/entry-common.S
  46. 55 0
      arch/arm/kernel/entry-header.S
  47. 3 0
      arch/arm/kernel/fiq.c
  48. 1 6
      arch/arm/kernel/module-plts.c
  49. 34 4
      arch/arm/kernel/module.c
  50. 2 0
      arch/arm/kernel/patch.c
  51. 6 90
      arch/arm/kernel/process.c
  52. 9 0
      arch/arm/kernel/ptrace.c
  53. 1 0
      arch/arm/kernel/reboot.c
  54. 13 7
      arch/arm/kernel/setup.c
  55. 1 34
      arch/arm/kernel/signal.c
  56. 1 1
      arch/arm/kernel/smp.c
  57. 3 1
      arch/arm/kernel/tcm.c
  58. 6 1
      arch/arm/kernel/traps.c
  59. 3 3
      arch/arm/kernel/vmlinux.lds.S
  60. 4 4
      arch/arm/kvm/arm.c
  61. 1 0
      arch/arm/lib/copy_page.S
  62. 2 2
      arch/arm/lib/csumpartialcopyuser.S
  63. 1 1
      arch/arm/lib/delay.c
  64. 2 2
      arch/arm/lib/uaccess_with_memcpy.c
  65. 4 2
      arch/arm/mach-exynos/suspend.c
  66. 2 2
      arch/arm/mach-mvebu/coherency.c
  67. 1 1
      arch/arm/mach-omap2/board-n8x0.c
  68. 2 2
      arch/arm/mach-omap2/omap-mpuss-lowpower.c
  69. 1 0
      arch/arm/mach-omap2/omap-smp.c
  70. 2 2
      arch/arm/mach-omap2/omap_device.c
  71. 2 2
      arch/arm/mach-omap2/omap_device.h
  72. 2 2
      arch/arm/mach-omap2/omap_hwmod.c
  73. 4 1
      arch/arm/mach-omap2/powerdomains43xx_data.c
  74. 3 3
      arch/arm/mach-omap2/wd_timer.c
  75. 4 1
      arch/arm/mach-shmobile/platsmp-apmu.c
  76. 1 1
      arch/arm/mach-tegra/cpuidle-tegra20.c
  77. 1 0
      arch/arm/mach-tegra/irq.c
  78. 1 0
      arch/arm/mach-ux500/pm.c
  79. 1 0
      arch/arm/mach-zynq/platsmp.c
  80. 5 3
      arch/arm/mm/Kconfig
  81. 1 1
      arch/arm/mm/cache-l2x0.c
  82. 5 5
      arch/arm/mm/context.c
  83. 162 0
      arch/arm/mm/fault.c
  84. 12 0
      arch/arm/mm/fault.h
  85. 39 0
      arch/arm/mm/init.c
  86. 2 2
      arch/arm/mm/ioremap.c
  87. 30 4
      arch/arm/mm/mmap.c
  88. 120 42
      arch/arm/mm/mmu.c
  89. 3 0
      arch/arm/mm/pageattr.c
  90. 16 38
      arch/arm/net/bpf_jit_32.c
  91. 1 1
      arch/arm/plat-iop/setup.c
  92. 2 0
      arch/arm/plat-omap/sram.c
  93. 1 0
      arch/arm64/Kconfig.debug
  94. 10 0
      arch/arm64/include/asm/atomic.h
  95. 4 4
      arch/arm64/include/asm/percpu.h
  96. 5 0
      arch/arm64/include/asm/pgalloc.h
  97. 1 0
      arch/arm64/include/asm/uaccess.h
  98. 1 1
      arch/arm64/mm/dma-mapping.c
  99. 3 1
      arch/avr32/include/asm/cache.h
  100. 0 0
      arch/avr32/include/asm/elf.h

+ 45 - 3
Documentation/dontdiff

@@ -3,9 +3,11 @@
3 3
 *.bc
4 4
 *.bin
5 5
 *.bz2
6
+*.c.[012]*.*
6 7
 *.cis
7 8
 *.cpio
8 9
 *.csp
10
+*.dbg
9 11
 *.dsp
10 12
 *.dvi
11 13
 *.elf
@@ -15,6 +17,7 @@
15 17
 *.gcov
16 18
 *.gen.S
17 19
 *.gif
20
+*.gmo
18 21
 *.grep
19 22
 *.grp
20 23
 *.gz
@@ -51,14 +54,17 @@
51 54
 *.tab.h
52 55
 *.tex
53 56
 *.ver
57
+*.vim
54 58
 *.xml
55 59
 *.xz
56 60
 *_MODULES
61
+*_reg_safe.h
57 62
 *_vga16.c
58 63
 *~
59 64
 \#*#
60 65
 *.9
61
-.*
66
+.[^g]*
67
+.gen*
62 68
 .*.d
63 69
 .mm
64 70
 53c700_d.h
@@ -72,9 +78,11 @@ Image
72 78
 Module.markers
73 79
 Module.symvers
74 80
 PENDING
81
+PERF*
75 82
 SCCS
76 83
 System.map*
77 84
 TAGS
85
+TRACEEVENT-CFLAGS
78 86
 aconf
79 87
 af_names.h
80 88
 aic7*reg.h*
@@ -83,6 +91,7 @@ aic7*seq.h*
83 91
 aicasm
84 92
 aicdb.h*
85 93
 altivec*.c
94
+ashldi3.S
86 95
 asm-offsets.h
87 96
 asm_offsets.h
88 97
 autoconf.h*
@@ -95,32 +104,40 @@ bounds.h
95 104
 bsetup
96 105
 btfixupprep
97 106
 build
107
+builtin-policy.h
98 108
 bvmlinux
99 109
 bzImage*
100 110
 capability_names.h
101 111
 capflags.c
102 112
 classlist.h*
113
+clut_vga16.c
114
+common-cmds.h
103 115
 comp*.log
104 116
 compile.h*
105 117
 conf
106 118
 config
107 119
 config-*
108 120
 config_data.h*
121
+config.c
109 122
 config.mak
110 123
 config.mak.autogen
124
+config.tmp
111 125
 conmakehash
112 126
 consolemap_deftbl.c*
113 127
 cpustr.h
114 128
 crc32table.h*
115 129
 cscope.*
116 130
 defkeymap.c
131
+devicetable-offsets.h
117 132
 devlist.h*
118 133
 dnotify_test
119 134
 docproc
120 135
 dslm
136
+dtc-lexer.lex.c
121 137
 elf2ecoff
122 138
 elfconfig.h*
123 139
 evergreen_reg_safe.h
140
+exception_policy.conf
124 141
 fixdep
125 142
 flask.h
126 143
 fore200e_mkfirm
@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
128 145
 gconf
129 146
 gconf.glade.h
130 147
 gen-devlist
148
+gen-kdb_cmds.c
131 149
 gen_crc32table
132 150
 gen_init_cpio
133 151
 generated
134 152
 genheaders
135 153
 genksyms
136 154
 *_gray256.c
155
+hash
156
+hid-example
137 157
 hpet_example
138 158
 hugepage-mmap
139 159
 hugepage-shm
@@ -148,14 +168,14 @@ int32.c
148 168
 int4.c
149 169
 int8.c
150 170
 kallsyms
151
-kconfig
171
+kern_constants.h
152 172
 keywords.c
153 173
 ksym.c*
154 174
 ksym.h*
155 175
 kxgettext
156 176
 lex.c
157 177
 lex.*.c
158
-linux
178
+lib1funcs.S
159 179
 logo_*.c
160 180
 logo_*_clut224.c
161 181
 logo_*_mono.c
@@ -166,12 +186,14 @@ machtypes.h
166 186
 map
167 187
 map_hugetlb
168 188
 mconf
189
+mdp
169 190
 miboot*
170 191
 mk_elfconfig
171 192
 mkboot
172 193
 mkbugboot
173 194
 mkcpustr
174 195
 mkdep
196
+mkpiggy
175 197
 mkprep
176 198
 mkregtable
177 199
 mktables
@@ -187,6 +209,8 @@ oui.c*
187 209
 page-types
188 210
 parse.c
189 211
 parse.h
212
+parse-events*
213
+pasyms.h
190 214
 patches*
191 215
 pca200e.bin
192 216
 pca200e_ecd.bin2
@@ -196,6 +220,7 @@ perf-archive
196 220
 piggyback
197 221
 piggy.gzip
198 222
 piggy.S
223
+pmu-*
199 224
 pnmtologo
200 225
 ppc_defs.h*
201 226
 pss_boot.h
@@ -205,7 +230,12 @@ r200_reg_safe.h
205 230
 r300_reg_safe.h
206 231
 r420_reg_safe.h
207 232
 r600_reg_safe.h
233
+randomize_layout_hash.h
234
+randomize_layout_seed.h
235
+realmode.lds
236
+realmode.relocs
208 237
 recordmcount
238
+regdb.c
209 239
 relocs
210 240
 rlim_names.h
211 241
 rn50_reg_safe.h
@@ -215,8 +245,12 @@ series
215 245
 setup
216 246
 setup.bin
217 247
 setup.elf
248
+signing_key*
249
+size_overflow_hash.h
218 250
 sImage
251
+slabinfo
219 252
 sm_tbl*
253
+sortextable
220 254
 split-include
221 255
 syscalltab.h
222 256
 tables.c
@@ -226,6 +260,7 @@ tftpboot.img
226 260
 timeconst.h
227 261
 times.h*
228 262
 trix_boot.h
263
+user_constants.h
229 264
 utsrelease.h*
230 265
 vdso-syms.lds
231 266
 vdso.lds
@@ -237,13 +272,17 @@ vdso32.lds
237 272
 vdso32.so.dbg
238 273
 vdso64.lds
239 274
 vdso64.so.dbg
275
+vdsox32.lds
276
+vdsox32-syms.lds
240 277
 version.h*
241 278
 vmImage
242 279
 vmlinux
243 280
 vmlinux-*
244 281
 vmlinux.aout
245 282
 vmlinux.bin.all
283
+vmlinux.bin.bz2
246 284
 vmlinux.lds
285
+vmlinux.relocs
247 286
 vmlinuz
248 287
 voffset.h
249 288
 vsyscall.lds
@@ -251,9 +290,12 @@ vsyscall_32.lds
251 290
 wanxlfw.inc
252 291
 uImage
253 292
 unifdef
293
+utsrelease.h
254 294
 wakeup.bin
255 295
 wakeup.elf
256 296
 wakeup.lds
297
+x509*
257 298
 zImage*
258 299
 zconf.hash.c
300
+zconf.lex.c
259 301
 zoffset.h

+ 31 - 8
Documentation/kbuild/makefiles.txt

@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
23 23
 	=== 4 Host Program support
24 24
 	   --- 4.1 Simple Host Program
25 25
 	   --- 4.2 Composite Host Programs
26
-	   --- 4.3 Using C++ for host programs
27
-	   --- 4.4 Controlling compiler options for host programs
28
-	   --- 4.5 When host programs are actually built
29
-	   --- 4.6 Using hostprogs-$(CONFIG_FOO)
26
+	   --- 4.3 Defining shared libraries
27
+	   --- 4.4 Using C++ for host programs
28
+	   --- 4.5 Controlling compiler options for host programs
29
+	   --- 4.6 When host programs are actually built
30
+	   --- 4.7 Using hostprogs-$(CONFIG_FOO)
30 31
 
31 32
 	=== 5 Kbuild clean infrastructure
32 33
 
@@ -643,7 +644,29 @@ Both possibilities are described in the following.
643 644
 	Finally, the two .o files are linked to the executable, lxdialog.
644 645
 	Note: The syntax <executable>-y is not permitted for host-programs.
645 646
 
646
---- 4.3 Using C++ for host programs
647
+--- 4.3 Defining shared libraries
648
+
649
+	Objects with extension .so are considered shared libraries, and
650
+	will be compiled as position independent objects.
651
+	Kbuild provides support for shared libraries, but the usage
652
+	shall be restricted.
653
+	In the following example the libkconfig.so shared library is used
654
+	to link the executable conf.
655
+
656
+	Example:
657
+		#scripts/kconfig/Makefile
658
+		hostprogs-y     := conf
659
+		conf-objs       := conf.o libkconfig.so
660
+		libkconfig-objs := expr.o type.o
661
+
662
+	Shared libraries always require a corresponding -objs line, and
663
+	in the example above the shared library libkconfig is composed by
664
+	the two objects expr.o and type.o.
665
+	expr.o and type.o will be built as position independent code and
666
+	linked as a shared library libkconfig.so. C++ is not supported for
667
+	shared libraries.
668
+
669
+--- 4.4 Using C++ for host programs
647 670
 
648 671
 	kbuild offers support for host programs written in C++. This was
649 672
 	introduced solely to support kconfig, and is not recommended
@@ -666,7 +689,7 @@ Both possibilities are described in the following.
666 689
 		qconf-cxxobjs := qconf.o
667 690
 		qconf-objs    := check.o
668 691
 
669
---- 4.4 Controlling compiler options for host programs
692
+--- 4.5 Controlling compiler options for host programs
670 693
 
671 694
 	When compiling host programs, it is possible to set specific flags.
672 695
 	The programs will always be compiled utilising $(HOSTCC) passed
@@ -694,7 +717,7 @@ Both possibilities are described in the following.
694 717
 	When linking qconf, it will be passed the extra option
695 718
 	"-L$(QTDIR)/lib".
696 719
 
697
---- 4.5 When host programs are actually built
720
+--- 4.6 When host programs are actually built
698 721
 
699 722
 	Kbuild will only build host-programs when they are referenced
700 723
 	as a prerequisite.
@@ -725,7 +748,7 @@ Both possibilities are described in the following.
725 748
 	This will tell kbuild to build lxdialog even if not referenced in
726 749
 	any rule.
727 750
 
728
---- 4.6 Using hostprogs-$(CONFIG_FOO)
751
+--- 4.7 Using hostprogs-$(CONFIG_FOO)
729 752
 
730 753
 	A typical pattern in a Kbuild file looks like this:
731 754
 

+ 39 - 0
Documentation/kernel-parameters.txt

@@ -1282,6 +1282,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1282 1282
 			[KNL] Should the hard-lockup detector generate
1283 1283
 			backtraces on all cpus.
1284 1284
 			Format: <integer>
1285
+	grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
1286
+			ignore grsecurity's /proc restrictions
1287
+
1288
+	grsec_sysfs_restrict= Format: 0 | 1
1289
+			Default: 1
1290
+			Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
1285 1291
 
1286 1292
 	hashdist=	[KNL,NUMA] Large hashes allocated during boot
1287 1293
 			are distributed across NUMA nodes.  Defaults on
@@ -2425,6 +2431,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2425 2431
 			noexec=on: enable non-executable mappings (default)
2426 2432
 			noexec=off: disable non-executable mappings
2427 2433
 
2434
+	nopcid		[X86-64]
2435
+			Disable PCID (Process-Context IDentifier) even if it
2436
+			is supported by the processor.
2437
+
2428 2438
 	nosmap		[X86]
2429 2439
 			Disable SMAP (Supervisor Mode Access Prevention)
2430 2440
 			even if it is supported by processor.
@@ -2723,6 +2733,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2723 2733
 			the specified number of seconds.  This is to be used if
2724 2734
 			your oopses keep scrolling off the screen.
2725 2735
 
2736
+	pax_nouderef	[X86] disables UDEREF.  Most likely needed under certain
2737
+			virtualization environments that don't cope well with the
2738
+			expand down segment used by UDEREF on X86-32 or the frequent
2739
+			page table updates on X86-64.
2740
+
2741
+	pax_sanitize_slab=
2742
+			Format: { 0 | 1 | off | fast | full }
2743
+			Options '0' and '1' are only provided for backward
2744
+			compatibility, 'off' or 'fast' should be used instead.
2745
+			0|off : disable slab object sanitization
2746
+			1|fast: enable slab object sanitization excluding
2747
+				whitelisted slabs (default)
2748
+			full  : sanitize all slabs, even the whitelisted ones
2749
+
2750
+	pax_softmode=	0/1 to disable/enable PaX softmode on boot already.
2751
+
2752
+	pax_extra_latent_entropy
2753
+			Enable a very simple form of latent entropy extraction
2754
+			from the first 4GB of memory as the bootmem allocator
2755
+			passes the memory pages to the buddy allocator.
2756
+
2757
+	pax_size_overflow_report_only
2758
+			Enables rate-limited logging of size_overflow plugin
2759
+			violations while disabling killing of the violating
2760
+			task.
2761
+
2762
+	pax_weakuderef	[X86-64] enables the weaker but faster form of UDEREF
2763
+			when the processor supports PCID.
2764
+
2726 2765
 	pcbit=		[HW,ISDN]
2727 2766
 
2728 2767
 	pcd.		[PARIDE]

+ 15 - 0
Documentation/sysctl/kernel.txt

@@ -42,6 +42,7 @@ show up in /proc/sys/kernel:
42 42
 - kptr_restrict
43 43
 - kstack_depth_to_print       [ X86 only ]
44 44
 - l2cr                        [ PPC only ]
45
+- modify_ldt                  [ X86 only ]
45 46
 - modprobe                    ==> Documentation/debugging-modules.txt
46 47
 - modules_disabled
47 48
 - msg_next_id		      [ sysv ipc ]
@@ -403,6 +404,20 @@ This flag controls the L2 cache of G3 processor boards. If
403 404
 
404 405
 ==============================================================
405 406
 
407
+modify_ldt: (X86 only)
408
+
409
+Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
410
+(Local Descriptor Table) may be needed to run a 16-bit or segmented code
411
+such as Dosemu or Wine. This is done via a system call which is not needed
412
+to run portable applications, and which can sometimes be abused to exploit
413
+some weaknesses of the architecture, opening new vulnerabilities.
414
+
415
+This sysctl allows one to increase the system's security by disabling the
416
+system call, or to restore compatibility with specific applications when it
417
+was already disabled.
418
+
419
+==============================================================
420
+
406 421
 modules_disabled:
407 422
 
408 423
 A toggle value indicating if modules are allowed to be loaded

+ 40 - 15
Makefile

@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
302 302
 HOSTCC       = gcc
303 303
 HOSTCXX      = g++
304 304
 HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
305
-HOSTCXXFLAGS = -O2
305
+HOSTCFLAGS   = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
306
+HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
307
+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
306 308
 
307 309
 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
308 310
 HOSTCFLAGS  += -Wno-unused-value -Wno-unused-parameter \
@@ -439,8 +441,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
439 441
 # Rules shared between *config targets and build targets
440 442
 
441 443
 # Basic helpers built in scripts/
442
-PHONY += scripts_basic
443
-scripts_basic:
444
+PHONY += scripts_basic gcc-plugins
445
+scripts_basic: gcc-plugins
444 446
 	$(Q)$(MAKE) $(build)=scripts/basic
445 447
 	$(Q)rm -f .tmp_quiet_recordmcount
446 448
 
@@ -633,6 +635,8 @@ endif
633 635
 # Tell gcc to never replace conditional load with a non-conditional one
634 636
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
635 637
 
638
+include scripts/Makefile.gcc-plugins
639
+
636 640
 ifdef CONFIG_READABLE_ASM
637 641
 # Disable optimizations that make assembler listings hard to read.
638 642
 # reorder blocks reorders the control in the function
@@ -726,7 +730,7 @@ KBUILD_CFLAGS   += $(call cc-option, -gsplit-dwarf, -g)
726 730
 else
727 731
 KBUILD_CFLAGS	+= -g
728 732
 endif
729
-KBUILD_AFLAGS	+= -Wa,-gdwarf-2
733
+KBUILD_AFLAGS	+= -Wa,--gdwarf-2
730 734
 endif
731 735
 ifdef CONFIG_DEBUG_INFO_DWARF4
732 736
 KBUILD_CFLAGS	+= $(call cc-option, -gdwarf-4,)
@@ -897,7 +901,7 @@ export mod_sign_cmd
897 901
 
898 902
 
899 903
 ifeq ($(KBUILD_EXTMOD),)
900
-core-y		+= kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
904
+core-y		+= kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
901 905
 
902 906
 vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
903 907
 		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
@@ -948,6 +952,8 @@ endif
948 952
 
949 953
 # The actual objects are generated when descending,
950 954
 # make sure no implicit rule kicks in
955
+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
956
+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
951 957
 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
952 958
 
953 959
 # Handle descending into subdirectories listed in $(vmlinux-dirs)
@@ -957,7 +963,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
957 963
 # Error messages still appears in the original language
958 964
 
959 965
 PHONY += $(vmlinux-dirs)
960
-$(vmlinux-dirs): prepare scripts
966
+$(vmlinux-dirs): gcc-plugins prepare scripts
961 967
 	$(Q)$(MAKE) $(build)=$@
962 968
 
963 969
 define filechk_kernel.release
@@ -1000,10 +1006,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
1000 1006
 
1001 1007
 archprepare: archheaders archscripts prepare1 scripts_basic
1002 1008
 
1009
+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1010
+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1003 1011
 prepare0: archprepare FORCE
1004 1012
 	$(Q)$(MAKE) $(build)=.
1005 1013
 
1006 1014
 # All the preparing..
1015
+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
1007 1016
 prepare: prepare0
1008 1017
 
1009 1018
 # Generate some files
@@ -1114,6 +1123,8 @@ all: modules
1114 1123
 # using awk while concatenating to the final file.
1115 1124
 
1116 1125
 PHONY += modules
1126
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1127
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1117 1128
 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
1118 1129
 	$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
1119 1130
 	@$(kecho) '  Building modules, stage 2.';
@@ -1129,7 +1140,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
1129 1140
 
1130 1141
 # Target to prepare building external modules
1131 1142
 PHONY += modules_prepare
1132
-modules_prepare: prepare scripts
1143
+modules_prepare: gcc-plugins prepare scripts
1133 1144
 
1134 1145
 # Target to install modules
1135 1146
 PHONY += modules_install
@@ -1195,7 +1206,11 @@ MRPROPER_FILES += .config .config.old .version .old_version \
1195 1206
 		  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
1196 1207
 		  signing_key.pem signing_key.priv signing_key.x509	\
1197 1208
 		  x509.genkey extra_certificates signing_key.x509.keyid	\
1198
-		  signing_key.x509.signer vmlinux-gdb.py
1209
+		  signing_key.x509.signer vmlinux-gdb.py \
1210
+		  tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
1211
+		  tools/gcc/size_overflow_plugin/size_overflow_hash.h \
1212
+		  tools/gcc/size_overflow_plugin/disable_size_overflow_hash.h \
1213
+		  tools/gcc/randomize_layout_seed.h
1199 1214
 
1200 1215
 # clean - Delete most, but leave enough to build external modules
1201 1216
 #
@@ -1234,7 +1249,7 @@ distclean: mrproper
1234 1249
 	@find $(srctree) $(RCS_FIND_IGNORE) \
1235 1250
 		\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
1236 1251
 		-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
1237
-		-o -name '.*.rej' -o -name '*%'  -o -name 'core' \) \
1252
+		-o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
1238 1253
 		-type f -print | xargs rm -f
1239 1254
 
1240 1255
 
@@ -1401,6 +1416,8 @@ PHONY += $(module-dirs) modules
1401 1416
 $(module-dirs): crmodverdir $(objtree)/Module.symvers
1402 1417
 	$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
1403 1418
 
1419
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1420
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1404 1421
 modules: $(module-dirs)
1405 1422
 	@$(kecho) '  Building modules, stage 2.';
1406 1423
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
@@ -1542,17 +1559,21 @@ else
1542 1559
         target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
1543 1560
 endif
1544 1561
 
1545
-%.s: %.c prepare scripts FORCE
1562
+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1563
+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1564
+%.s: %.c gcc-plugins prepare scripts FORCE
1546 1565
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1547 1566
 %.i: %.c prepare scripts FORCE
1548 1567
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1549
-%.o: %.c prepare scripts FORCE
1568
+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1569
+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1570
+%.o: %.c gcc-plugins prepare scripts FORCE
1550 1571
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1551 1572
 %.lst: %.c prepare scripts FORCE
1552 1573
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1553
-%.s: %.S prepare scripts FORCE
1574
+%.s: %.S gcc-plugins prepare scripts FORCE
1554 1575
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1555
-%.o: %.S prepare scripts FORCE
1576
+%.o: %.S gcc-plugins prepare scripts FORCE
1556 1577
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1557 1578
 %.symtypes: %.c prepare scripts FORCE
1558 1579
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
@@ -1564,11 +1585,15 @@ endif
1564 1585
 	$(build)=$(build-dir)
1565 1586
 # Make sure the latest headers are built for Documentation
1566 1587
 Documentation/: headers_install
1567
-%/: prepare scripts FORCE
1588
+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1589
+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1590
+%/: gcc-plugins prepare scripts FORCE
1568 1591
 	$(cmd_crmodverdir)
1569 1592
 	$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1570 1593
 	$(build)=$(build-dir)
1571
-%.ko: prepare scripts FORCE
1594
+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1595
+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1596
+%.ko: gcc-plugins prepare scripts FORCE
1572 1597
 	$(cmd_crmodverdir)
1573 1598
 	$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1)   \
1574 1599
 	$(build)=$(build-dir) $(@:.ko=.o)

+ 10 - 0
arch/alpha/include/asm/atomic.h

@@ -251,4 +251,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
251 251
 #define atomic_dec(v) atomic_sub(1,(v))
252 252
 #define atomic64_dec(v) atomic64_sub(1,(v))
253 253
 
254
+#define atomic64_read_unchecked(v)		atomic64_read(v)
255
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
256
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
257
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
258
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
259
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
260
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
261
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
262
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
263
+
254 264
 #endif /* _ALPHA_ATOMIC_H */

+ 2 - 2
arch/alpha/include/asm/cache.h

@@ -4,19 +4,19 @@
4 4
 #ifndef __ARCH_ALPHA_CACHE_H
5 5
 #define __ARCH_ALPHA_CACHE_H
6 6
 
7
+#include <linux/const.h>
7 8
 
8 9
 /* Bytes per L1 (data) cache line. */
9 10
 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
10
-# define L1_CACHE_BYTES     64
11 11
 # define L1_CACHE_SHIFT     6
12 12
 #else
13 13
 /* Both EV4 and EV5 are write-through, read-allocate,
14 14
    direct-mapped, physical.
15 15
 */
16
-# define L1_CACHE_BYTES     32
17 16
 # define L1_CACHE_SHIFT     5
18 17
 #endif
19 18
 
19
+#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
20 20
 #define SMP_CACHE_BYTES    L1_CACHE_BYTES
21 21
 
22 22
 #endif

+ 7 - 0
arch/alpha/include/asm/elf.h

@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
91 91
 
92 92
 #define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
93 93
 
94
+#ifdef CONFIG_PAX_ASLR
95
+#define PAX_ELF_ET_DYN_BASE	(current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
96
+
97
+#define PAX_DELTA_MMAP_LEN	(current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
98
+#define PAX_DELTA_STACK_LEN	(current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
99
+#endif
100
+
94 101
 /* $0 is set by ld.so to a pointer to a function which might be 
95 102
    registered using atexit.  This provides a mean for the dynamic
96 103
    linker to call DT_FINI functions for shared libraries that have

+ 6 - 0
arch/alpha/include/asm/pgalloc.h

@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
29 29
 	pgd_set(pgd, pmd);
30 30
 }
31 31
 
32
+static inline void
33
+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
34
+{
35
+	pgd_populate(mm, pgd, pmd);
36
+}
37
+
32 38
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
33 39
 
34 40
 static inline void

+ 11 - 0
arch/alpha/include/asm/pgtable.h

@@ -101,6 +101,17 @@ struct vm_area_struct;
101 101
 #define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
102 102
 #define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
103 103
 #define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
104
+
105
+#ifdef CONFIG_PAX_PAGEEXEC
106
+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
107
+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
108
+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
109
+#else
110
+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
111
+# define PAGE_COPY_NOEXEC	PAGE_COPY
112
+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
113
+#endif
114
+
104 115
 #define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
105 116
 
106 117
 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))

+ 1 - 1
arch/alpha/kernel/module.c

@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
160 160
 
161 161
 	/* The small sections were sorted to the end of the segment.
162 162
 	   The following should definitely cover them.  */
163
-	gp = (u64)me->module_core + me->core_size - 0x8000;
163
+	gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
164 164
 	got = sechdrs[me->arch.gotsecindex].sh_addr;
165 165
 
166 166
 	for (i = 0; i < n; i++) {

+ 12 - 6
arch/alpha/kernel/osf_sys.c

@@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1300 1300
    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
1301 1301
 
1302 1302
 static unsigned long
1303
-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1304
-		         unsigned long limit)
1303
+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
1304
+		         unsigned long limit, unsigned long flags)
1305 1305
 {
1306 1306
 	struct vm_unmapped_area_info info;
1307
+	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
1307 1308
 
1308 1309
 	info.flags = 0;
1309 1310
 	info.length = len;
@@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1311 1312
 	info.high_limit = limit;
1312 1313
 	info.align_mask = 0;
1313 1314
 	info.align_offset = 0;
1315
+	info.threadstack_offset = offset;
1314 1316
 	return vm_unmapped_area(&info);
1315 1317
 }
1316 1318
 
@@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1343 1345
 	   merely specific addresses, but regions of memory -- perhaps
1344 1346
 	   this feature should be incorporated into all ports?  */
1345 1347
 
1348
+#ifdef CONFIG_PAX_RANDMMAP
1349
+	if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1350
+#endif
1351
+
1346 1352
 	if (addr) {
1347
-		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
1353
+		addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
1348 1354
 		if (addr != (unsigned long) -ENOMEM)
1349 1355
 			return addr;
1350 1356
 	}
1351 1357
 
1352 1358
 	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
1353
-	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
1354
-					 len, limit);
1359
+	addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
1360
+
1355 1361
 	if (addr != (unsigned long) -ENOMEM)
1356 1362
 		return addr;
1357 1363
 
1358 1364
 	/* Finally, try allocating in low memory.  */
1359
-	addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
1365
+	addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
1360 1366
 
1361 1367
 	return addr;
1362 1368
 }

+ 140 - 1
arch/alpha/mm/fault.c

@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
52 52
 	__reload_thread(pcb);
53 53
 }
54 54
 
55
+#ifdef CONFIG_PAX_PAGEEXEC
56
+/*
57
+ * PaX: decide what to do with offenders (regs->pc = fault address)
58
+ *
59
+ * returns 1 when task should be killed
60
+ *         2 when patched PLT trampoline was detected
61
+ *         3 when unpatched PLT trampoline was detected
62
+ */
63
+static int pax_handle_fetch_fault(struct pt_regs *regs)
64
+{
65
+
66
+#ifdef CONFIG_PAX_EMUPLT
67
+	int err;
68
+
69
+	do { /* PaX: patched PLT emulation #1 */
70
+		unsigned int ldah, ldq, jmp;
71
+
72
+		err = get_user(ldah, (unsigned int *)regs->pc);
73
+		err |= get_user(ldq, (unsigned int *)(regs->pc+4));
74
+		err |= get_user(jmp, (unsigned int *)(regs->pc+8));
75
+
76
+		if (err)
77
+			break;
78
+
79
+		if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
80
+		    (ldq & 0xFFFF0000U) == 0xA77B0000U &&
81
+		    jmp == 0x6BFB0000U)
82
+		{
83
+			unsigned long r27, addr;
84
+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
85
+			unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
86
+
87
+			addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
88
+			err = get_user(r27, (unsigned long *)addr);
89
+			if (err)
90
+				break;
91
+
92
+			regs->r27 = r27;
93
+			regs->pc = r27;
94
+			return 2;
95
+		}
96
+	} while (0);
97
+
98
+	do { /* PaX: patched PLT emulation #2 */
99
+		unsigned int ldah, lda, br;
100
+
101
+		err = get_user(ldah, (unsigned int *)regs->pc);
102
+		err |= get_user(lda, (unsigned int *)(regs->pc+4));
103
+		err |= get_user(br, (unsigned int *)(regs->pc+8));
104
+
105
+		if (err)
106
+			break;
107
+
108
+		if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
109
+		    (lda & 0xFFFF0000U) == 0xA77B0000U &&
110
+		    (br & 0xFFE00000U) == 0xC3E00000U)
111
+		{
112
+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
113
+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
114
+			unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
115
+
116
+			regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
117
+			regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
118
+			return 2;
119
+		}
120
+	} while (0);
121
+
122
+	do { /* PaX: unpatched PLT emulation */
123
+		unsigned int br;
124
+
125
+		err = get_user(br, (unsigned int *)regs->pc);
126
+
127
+		if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
128
+			unsigned int br2, ldq, nop, jmp;
129
+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
130
+
131
+			addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
132
+			err = get_user(br2, (unsigned int *)addr);
133
+			err |= get_user(ldq, (unsigned int *)(addr+4));
134
+			err |= get_user(nop, (unsigned int *)(addr+8));
135
+			err |= get_user(jmp, (unsigned int *)(addr+12));
136
+			err |= get_user(resolver, (unsigned long *)(addr+16));
137
+
138
+			if (err)
139
+				break;
140
+
141
+			if (br2 == 0xC3600000U &&
142
+			    ldq == 0xA77B000CU &&
143
+			    nop == 0x47FF041FU &&
144
+			    jmp == 0x6B7B0000U)
145
+			{
146
+				regs->r28 = regs->pc+4;
147
+				regs->r27 = addr+16;
148
+				regs->pc = resolver;
149
+				return 3;
150
+			}
151
+		}
152
+	} while (0);
153
+#endif
154
+
155
+	return 1;
156
+}
157
+
158
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
159
+{
160
+	unsigned long i;
161
+
162
+	printk(KERN_ERR "PAX: bytes at PC: ");
163
+	for (i = 0; i < 5; i++) {
164
+		unsigned int c;
165
+		if (get_user(c, (unsigned int *)pc+i))
166
+			printk(KERN_CONT "???????? ");
167
+		else
168
+			printk(KERN_CONT "%08x ", c);
169
+	}
170
+	printk("\n");
171
+}
172
+#endif
55 173
 
56 174
 /*
57 175
  * This routine handles page faults.  It determines the address,
@@ -132,8 +250,29 @@ retry:
132 250
  good_area:
133 251
 	si_code = SEGV_ACCERR;
134 252
 	if (cause < 0) {
135
-		if (!(vma->vm_flags & VM_EXEC))
253
+		if (!(vma->vm_flags & VM_EXEC)) {
254
+
255
+#ifdef CONFIG_PAX_PAGEEXEC
256
+			if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
257
+				goto bad_area;
258
+
259
+			up_read(&mm->mmap_sem);
260
+			switch (pax_handle_fetch_fault(regs)) {
261
+
262
+#ifdef CONFIG_PAX_EMUPLT
263
+			case 2:
264
+			case 3:
265
+				return;
266
+#endif
267
+
268
+			}
269
+			pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
270
+			do_group_exit(SIGKILL);
271
+#else
136 272
 			goto bad_area;
273
+#endif
274
+
275
+		}
137 276
 	} else if (!cause) {
138 277
 		/* Allow reads even for write-only mappings */
139 278
 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))

+ 1 - 0
arch/arc/Kconfig

@@ -528,6 +528,7 @@ config ARC_DBG_TLB_MISS_COUNT
528 528
 	bool "Profile TLB Misses"
529 529
 	default n
530 530
 	select DEBUG_FS
531
+	depends on !GRKERNSEC_KMEM
531 532
 	help
532 533
 	  Counts number of I and D TLB Misses and exports them via Debugfs
533 534
 	  The counters can be cleared via Debugfs as well

+ 3 - 1
arch/arm/Kconfig

@@ -1690,6 +1690,7 @@ config HIGHPTE
1690 1690
 config CPU_SW_DOMAIN_PAN
1691 1691
 	bool "Enable use of CPU domains to implement privileged no-access"
1692 1692
 	depends on MMU && !ARM_LPAE
1693
+	depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
1693 1694
 	default y
1694 1695
 	help
1695 1696
 	  Increase kernel security by ensuring that normal kernel accesses
@@ -1766,7 +1767,7 @@ config ALIGNMENT_TRAP
1766 1767
 
1767 1768
 config UACCESS_WITH_MEMCPY
1768 1769
 	bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1769
-	depends on MMU
1770
+	depends on MMU && !PAX_MEMORY_UDEREF
1770 1771
 	default y if CPU_FEROCEON
1771 1772
 	help
1772 1773
 	  Implement faster copy_to_user and clear_user methods for CPU
@@ -2001,6 +2002,7 @@ config KEXEC
2001 2002
 	depends on (!SMP || PM_SLEEP_SMP)
2002 2003
 	depends on !CPU_V7M
2003 2004
 	select KEXEC_CORE
2005
+	depends on !GRKERNSEC_KMEM
2004 2006
 	help
2005 2007
 	  kexec is a system call that implements the ability to shutdown your
2006 2008
 	  current kernel, and to start another kernel.  It is like a reboot

+ 1 - 0
arch/arm/Kconfig.debug

@@ -7,6 +7,7 @@ config ARM_PTDUMP
7 7
 	depends on DEBUG_KERNEL
8 8
 	depends on MMU
9 9
 	select DEBUG_FS
10
+	depends on !GRKERNSEC_KMEM
10 11
 	---help---
11 12
 	  Say Y here if you want to show the kernel pagetable layout in a
12 13
 	  debugfs file. This information is only useful for kernel developers

+ 291 - 32
arch/arm/include/asm/atomic.h

@@ -18,17 +18,41 @@
18 18
 #include <asm/barrier.h>
19 19
 #include <asm/cmpxchg.h>
20 20
 
21
+#ifdef CONFIG_GENERIC_ATOMIC64
22
+#include <asm-generic/atomic64.h>
23
+#endif
24
+
21 25
 #define ATOMIC_INIT(i)	{ (i) }
22 26
 
23 27
 #ifdef __KERNEL__
24 28
 
29
+#ifdef CONFIG_THUMB2_KERNEL
30
+#define REFCOUNT_TRAP_INSN "bkpt	0xf1"
31
+#else
32
+#define REFCOUNT_TRAP_INSN "bkpt	0xf103"
33
+#endif
34
+
35
+#define _ASM_EXTABLE(from, to)		\
36
+"	.pushsection __ex_table,\"a\"\n"\
37
+"	.align	3\n"			\
38
+"	.long	" #from ", " #to"\n"	\
39
+"	.popsection"
40
+
25 41
 /*
26 42
  * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 43
  * strex/ldrex monitor on some implementations. The reason we can use it for
28 44
  * atomic_set() is the clrex or dummy strex done on every exception return.
29 45
  */
30 46
 #define atomic_read(v)	READ_ONCE((v)->counter)
47
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
48
+{
49
+	return READ_ONCE(v->counter);
50
+}
31 51
 #define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
52
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
53
+{
54
+	WRITE_ONCE(v->counter, i);
55
+}
32 56
 
33 57
 #if __LINUX_ARM_ARCH__ >= 6
34 58
 
@@ -38,38 +62,64 @@
38 62
  * to ensure that the update happens.
39 63
  */
40 64
 
41
-#define ATOMIC_OP(op, c_op, asm_op)					\
42
-static inline void atomic_##op(int i, atomic_t *v)			\
65
+#ifdef CONFIG_PAX_REFCOUNT
66
+#define __OVERFLOW_POST			\
67
+	"	bvc	3f\n"		\
68
+	"2:	" REFCOUNT_TRAP_INSN "\n"\
69
+	"3:\n"
70
+#define __OVERFLOW_POST_RETURN		\
71
+	"	bvc	3f\n"		\
72
+"	mov	%0, %1\n"		\
73
+	"2:	" REFCOUNT_TRAP_INSN "\n"\
74
+	"3:\n"
75
+#define __OVERFLOW_EXTABLE		\
76
+	"4:\n"				\
77
+	_ASM_EXTABLE(2b, 4b)
78
+#else
79
+#define __OVERFLOW_POST
80
+#define __OVERFLOW_POST_RETURN
81
+#define __OVERFLOW_EXTABLE
82
+#endif
83
+
84
+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable)		\
85
+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)	\
43 86
 {									\
44 87
 	unsigned long tmp;						\
45 88
 	int result;							\
46 89
 									\
47 90
 	prefetchw(&v->counter);						\
48
-	__asm__ __volatile__("@ atomic_" #op "\n"			\
91
+	__asm__ __volatile__("@ atomic_" #op #suffix "\n"		\
49 92
 "1:	ldrex	%0, [%3]\n"						\
50 93
 "	" #asm_op "	%0, %0, %4\n"					\
94
+	post_op								\
51 95
 "	strex	%1, %0, [%3]\n"						\
52 96
 "	teq	%1, #0\n"						\
53
-"	bne	1b"							\
97
+"	bne	1b\n"							\
98
+	extable								\
54 99
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
55 100
 	: "r" (&v->counter), "Ir" (i)					\
56 101
 	: "cc");							\
57 102
 }									\
58 103
 
59
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
60
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
104
+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
105
+				    __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
106
+
107
+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable)	\
108
+static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
61 109
 {									\
62 110
 	unsigned long tmp;						\
63 111
 	int result;							\
64 112
 									\
65 113
 	prefetchw(&v->counter);						\
66 114
 									\
67
-	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
115
+	__asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n"	\
68 116
 "1:	ldrex	%0, [%3]\n"						\
69 117
 "	" #asm_op "	%0, %0, %4\n"					\
118
+	post_op								\
70 119
 "	strex	%1, %0, [%3]\n"						\
71 120
 "	teq	%1, #0\n"						\
72
-"	bne	1b"							\
121
+"	bne	1b\n"							\
122
+	extable								\
73 123
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
74 124
 	: "r" (&v->counter), "Ir" (i)					\
75 125
 	: "cc");							\
@@ -78,8 +128,12 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
78 128
 }
79 129
 
80 130
 #define atomic_add_return_relaxed	atomic_add_return_relaxed
131
+#define atomic_add_return_unchecked	atomic_add_return_unchecked_relaxed
81 132
 #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
82 133
 
134
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
135
+					   __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
136
+
83 137
 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
84 138
 {
85 139
 	int oldval;
@@ -113,12 +167,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
113 167
 	__asm__ __volatile__ ("@ atomic_add_unless\n"
114 168
 "1:	ldrex	%0, [%4]\n"
115 169
 "	teq	%0, %5\n"
116
-"	beq	2f\n"
117
-"	add	%1, %0, %6\n"
170
+"	beq	4f\n"
171
+"	adds	%1, %0, %6\n"
172
+
173
+#ifdef CONFIG_PAX_REFCOUNT
174
+"	bvc	3f\n"
175
+"2:	" REFCOUNT_TRAP_INSN "\n"
176
+"3:\n"
177
+#endif
178
+
118 179
 "	strex	%2, %1, [%4]\n"
119 180
 "	teq	%2, #0\n"
120 181
 "	bne	1b\n"
121
-"2:"
182
+"4:"
183
+
184
+#ifdef CONFIG_PAX_REFCOUNT
185
+	_ASM_EXTABLE(2b, 4b)
186
+#endif
187
+
122 188
 	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
123 189
 	: "r" (&v->counter), "r" (u), "r" (a)
124 190
 	: "cc");
@@ -129,14 +195,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
129 195
 	return oldval;
130 196
 }
131 197
 
198
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
199
+{
200
+	unsigned long oldval, res;
201
+
202
+	smp_mb();
203
+
204
+	do {
205
+		__asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
206
+		"ldrex	%1, [%3]\n"
207
+		"mov	%0, #0\n"
208
+		"teq	%1, %4\n"
209
+		"strexeq %0, %5, [%3]\n"
210
+		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
211
+		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
212
+		    : "cc");
213
+	} while (res);
214
+
215
+	smp_mb();
216
+
217
+	return oldval;
218
+}
219
+
132 220
 #else /* ARM_ARCH_6 */
133 221
 
134 222
 #ifdef CONFIG_SMP
135 223
 #error SMP not supported on pre-ARMv6 CPUs
136 224
 #endif
137 225
 
138
-#define ATOMIC_OP(op, c_op, asm_op)					\
139
-static inline void atomic_##op(int i, atomic_t *v)			\
226
+#define __ATOMIC_OP(op, suffix, c_op, asm_op)				\
227
+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)	\
140 228
 {									\
141 229
 	unsigned long flags;						\
142 230
 									\
@@ -145,8 +233,11 @@ static inline void atomic_##op(int i, atomic_t *v)			\
145 233
 	raw_local_irq_restore(flags);					\
146 234
 }									\
147 235
 
148
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
149
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
236
+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op)	\
237
+				    __ATOMIC_OP(op, _unchecked, c_op, asm_op)
238
+
239
+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op)			\
240
+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
150 241
 {									\
151 242
 	unsigned long flags;						\
152 243
 	int val;							\
@@ -159,6 +250,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
159 250
 	return val;							\
160 251
 }
161 252
 
253
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
254
+					   __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
255
+
162 256
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
163 257
 {
164 258
 	int ret;
@@ -173,6 +267,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
173 267
 	return ret;
174 268
 }
175 269
 
270
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
271
+{
272
+	return atomic_cmpxchg((atomic_t *)v, old, new);
273
+}
274
+
176 275
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
177 276
 {
178 277
 	int c, old;
@@ -201,16 +300,38 @@ ATOMIC_OP(xor, ^=, eor)
201 300
 
202 301
 #undef ATOMIC_OPS
203 302
 #undef ATOMIC_OP_RETURN
303
+#undef __ATOMIC_OP_RETURN
204 304
 #undef ATOMIC_OP
305
+#undef __ATOMIC_OP
205 306
 
206 307
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
308
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
309
+{
310
+	return xchg_relaxed(&v->counter, new);
311
+}
207 312
 
208 313
 #define atomic_inc(v)		atomic_add(1, v)
314
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
315
+{
316
+	atomic_add_unchecked(1, v);
317
+}
209 318
 #define atomic_dec(v)		atomic_sub(1, v)
319
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
320
+{
321
+	atomic_sub_unchecked(1, v);
322
+}
210 323
 
211 324
 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
325
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
326
+{
327
+	return atomic_add_return_unchecked(1, v) == 0;
328
+}
212 329
 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
213 330
 #define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
331
+static inline int atomic_inc_return_unchecked_relaxed(atomic_unchecked_t *v)
332
+{
333
+	return atomic_add_return_unchecked_relaxed(1, v);
334
+}
214 335
 #define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
215 336
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
216 337
 
@@ -221,6 +342,14 @@ typedef struct {
221 342
 	long long counter;
222 343
 } atomic64_t;
223 344
 
345
+#ifdef CONFIG_PAX_REFCOUNT
346
+typedef struct {
347
+	long long counter;
348
+} atomic64_unchecked_t;
349
+#else
350
+typedef atomic64_t atomic64_unchecked_t;
351
+#endif
352
+
224 353
 #define ATOMIC64_INIT(i) { (i) }
225 354
 
226 355
 #ifdef CONFIG_ARM_LPAE
@@ -237,6 +366,19 @@ static inline long long atomic64_read(const atomic64_t *v)
237 366
 	return result;
238 367
 }
239 368
 
369
+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
370
+{
371
+	long long result;
372
+
373
+	__asm__ __volatile__("@ atomic64_read_unchecked\n"
374
+"	ldrd	%0, %H0, [%1]"
375
+	: "=&r" (result)
376
+	: "r" (&v->counter), "Qo" (v->counter)
377
+	);
378
+
379
+	return result;
380
+}
381
+
240 382
 static inline void atomic64_set(atomic64_t *v, long long i)
241 383
 {
242 384
 	__asm__ __volatile__("@ atomic64_set\n"
@@ -245,6 +387,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
245 387
 	: "r" (&v->counter), "r" (i)
246 388
 	);
247 389
 }
390
+
391
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
392
+{
393
+	__asm__ __volatile__("@ atomic64_set_unchecked\n"
394
+"	strd	%2, %H2, [%1]"
395
+	: "=Qo" (v->counter)
396
+	: "r" (&v->counter), "r" (i)
397
+	);
398
+}
248 399
 #else
249 400
 static inline long long atomic64_read(const atomic64_t *v)
250 401
 {
@@ -259,6 +410,19 @@ static inline long long atomic64_read(const atomic64_t *v)
259 410
 	return result;
260 411
 }
261 412
 
413
+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
414
+{
415
+	long long result;
416
+
417
+	__asm__ __volatile__("@ atomic64_read_unchecked\n"
418
+"	ldrexd	%0, %H0, [%1]"
419
+	: "=&r" (result)
420
+	: "r" (&v->counter), "Qo" (v->counter)
421
+	);
422
+
423
+	return result;
424
+}
425
+
262 426
 static inline void atomic64_set(atomic64_t *v, long long i)
263 427
 {
264 428
 	long long tmp;
@@ -273,43 +437,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
273 437
 	: "r" (&v->counter), "r" (i)
274 438
 	: "cc");
275 439
 }
440
+
441
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
442
+{
443
+	long long tmp;
444
+
445
+	prefetchw(&v->counter);
446
+	__asm__ __volatile__("@ atomic64_set_unchecked\n"
447
+"1:	ldrexd	%0, %H0, [%2]\n"
448
+"	strexd	%0, %3, %H3, [%2]\n"
449
+"	teq	%0, #0\n"
450
+"	bne	1b"
451
+	: "=&r" (tmp), "=Qo" (v->counter)
452
+	: "r" (&v->counter), "r" (i)
453
+	: "cc");
454
+}
276 455
 #endif
277 456
 
278
-#define ATOMIC64_OP(op, op1, op2)					\
279
-static inline void atomic64_##op(long long i, atomic64_t *v)		\
457
+#undef __OVERFLOW_POST_RETURN
458
+#define __OVERFLOW_POST_RETURN		\
459
+	"	bvc	3f\n"		\
460
+"	mov	%0, %1\n"		\
461
+"	mov	%H0, %H1\n"		\
462
+	"2:	" REFCOUNT_TRAP_INSN "\n"\
463
+	"3:\n"
464
+
465
+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable)		\
466
+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
280 467
 {									\
281 468
 	long long result;						\
282 469
 	unsigned long tmp;						\
283 470
 									\
284 471
 	prefetchw(&v->counter);						\
285
-	__asm__ __volatile__("@ atomic64_" #op "\n"			\
472
+	__asm__ __volatile__("@ atomic64_" #op #suffix "\n"		\
286 473
 "1:	ldrexd	%0, %H0, [%3]\n"					\
287 474
 "	" #op1 " %Q0, %Q0, %Q4\n"					\
288 475
 "	" #op2 " %R0, %R0, %R4\n"					\
476
+	post_op								\
289 477
 "	strexd	%1, %0, %H0, [%3]\n"					\
290 478
 "	teq	%1, #0\n"						\
291
-"	bne	1b"							\
479
+"	bne	1b\n"							\
480
+	extable								\
292 481
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
293 482
 	: "r" (&v->counter), "r" (i)					\
294 483
 	: "cc");							\
295 484
 }									\
296 485
 
297
-#define ATOMIC64_OP_RETURN(op, op1, op2)				\
486
+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
487
+				  __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
488
+
489
+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable)	\
298 490
 static inline long long							\
299
-atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
491
+atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
300 492
 {									\
301 493
 	long long result;						\
302 494
 	unsigned long tmp;						\
303 495
 									\
304 496
 	prefetchw(&v->counter);						\
305 497
 									\
306
-	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
498
+	__asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n"	\
307 499
 "1:	ldrexd	%0, %H0, [%3]\n"					\
308 500
 "	" #op1 " %Q0, %Q0, %Q4\n"					\
309 501
 "	" #op2 " %R0, %R0, %R4\n"					\
502
+	post_op								\
310 503
 "	strexd	%1, %0, %H0, [%3]\n"					\
311 504
 "	teq	%1, #0\n"						\
312
-"	bne	1b"							\
505
+"	bne	1b\n"							\
506
+	extable								\
313 507
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
314 508
 	: "r" (&v->counter), "r" (i)					\
315 509
 	: "cc");							\
@@ -317,6 +511,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
317 511
 	return result;							\
318 512
 }
319 513
 
514
+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
515
+					 __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
516
+
320 517
 #define ATOMIC64_OPS(op, op1, op2)					\
321 518
 	ATOMIC64_OP(op, op1, op2)					\
322 519
 	ATOMIC64_OP_RETURN(op, op1, op2)
@@ -325,6 +522,7 @@ ATOMIC64_OPS(add, adds, adc)
325 522
 ATOMIC64_OPS(sub, subs, sbc)
326 523
 
327 524
 #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
525
+#define atomic64_add_return_unchecked	atomic64_add_return_unchecked_relaxed
328 526
 #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
329 527
 
330 528
 #define atomic64_andnot atomic64_andnot
@@ -336,7 +534,12 @@ ATOMIC64_OP(xor, eor, eor)
336 534
 
337 535
 #undef ATOMIC64_OPS
338 536
 #undef ATOMIC64_OP_RETURN
537
+#undef __ATOMIC64_OP_RETURN
339 538
 #undef ATOMIC64_OP
539
+#undef __ATOMIC64_OP
540
+#undef __OVERFLOW_EXTABLE
541
+#undef __OVERFLOW_POST_RETURN
542
+#undef __OVERFLOW_POST
340 543
 
341 544
 static inline long long
342 545
 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
@@ -361,6 +564,33 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
361 564
 	return oldval;
362 565
 }
363 566
 #define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
567
+#define atomic64_cmpxchg_unchecked	atomic64_cmpxchg_unchecked_relaxed
568
+
569
+static inline long long
570
+atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old,
571
+					long long new)
572
+{
573
+	long long oldval;
574
+	unsigned long res;
575
+
576
+	smp_mb();
577
+
578
+	do {
579
+		__asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
580
+		"ldrexd		%1, %H1, [%3]\n"
581
+		"mov		%0, #0\n"
582
+		"teq		%1, %4\n"
583
+		"teqeq		%H1, %H4\n"
584
+		"strexdeq	%0, %5, %H5, [%3]"
585
+		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
586
+		: "r" (&ptr->counter), "r" (old), "r" (new)
587
+		: "cc");
588
+	} while (res);
589
+
590
+	smp_mb();
591
+
592
+	return oldval;
593
+}
364 594
 
365 595
 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
366 596
 {
@@ -385,21 +615,35 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
385 615
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
386 616
 {
387 617
 	long long result;
388
-	unsigned long tmp;
618
+	u64 tmp;
389 619
 
390 620
 	smp_mb();
391 621
 	prefetchw(&v->counter);
392 622
 
393 623
 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
394
-"1:	ldrexd	%0, %H0, [%3]\n"
395
-"	subs	%Q0, %Q0, #1\n"
396
-"	sbc	%R0, %R0, #0\n"
624
+"1:	ldrexd	%1, %H1, [%3]\n"
625
+"	subs	%Q0, %Q1, #1\n"
626
+"	sbcs	%R0, %R1, #0\n"
627
+
628
+#ifdef CONFIG_PAX_REFCOUNT
629
+"	bvc	3f\n"
630
+"	mov	%Q0, %Q1\n"
631
+"	mov	%R0, %R1\n"
632
+"2:	" REFCOUNT_TRAP_INSN "\n"
633
+"3:\n"
634
+#endif
635
+
397 636
 "	teq	%R0, #0\n"
398
-"	bmi	2f\n"
637
+"	bmi	4f\n"
399 638
 "	strexd	%1, %0, %H0, [%3]\n"
400 639
 "	teq	%1, #0\n"
401 640
 "	bne	1b\n"
402
-"2:"
641
+"4:\n"
642
+
643
+#ifdef CONFIG_PAX_REFCOUNT
644
+	_ASM_EXTABLE(2b, 4b)
645
+#endif
646
+
403 647
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
404 648
 	: "r" (&v->counter)
405 649
 	: "cc");
@@ -423,13 +667,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
423 667
 "	teq	%0, %5\n"
424 668
 "	teqeq	%H0, %H5\n"
425 669
 "	moveq	%1, #0\n"
426
-"	beq	2f\n"
670
+"	beq	4f\n"
427 671
 "	adds	%Q0, %Q0, %Q6\n"
428
-"	adc	%R0, %R0, %R6\n"
672
+"	adcs	%R0, %R0, %R6\n"
673
+
674
+#ifdef CONFIG_PAX_REFCOUNT
675
+"	bvc	3f\n"
676
+"2:	" REFCOUNT_TRAP_INSN "\n"
677
+"3:\n"
678
+#endif
679
+
429 680
 "	strexd	%2, %0, %H0, [%4]\n"
430 681
 "	teq	%2, #0\n"
431 682
 "	bne	1b\n"
432
-"2:"
683
+"4:\n"
684
+
685
+#ifdef CONFIG_PAX_REFCOUNT
686
+	_ASM_EXTABLE(2b, 4b)
687
+#endif
688
+
433 689
 	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
434 690
 	: "r" (&v->counter), "r" (u), "r" (a)
435 691
 	: "cc");
@@ -442,10 +698,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
442 698
 
443 699
 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
444 700
 #define atomic64_inc(v)			atomic64_add(1LL, (v))
701
+#define atomic64_inc_unchecked(v)	atomic64_add_unchecked(1LL, (v))
445 702
 #define atomic64_inc_return_relaxed(v)	atomic64_add_return_relaxed(1LL, (v))
703
+#define atomic64_inc_return_unchecked_relaxed(v)	atomic64_add_return_unchecked_relaxed(1LL, (v))
446 704
 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
447 705
 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
448 706
 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
707
+#define atomic64_dec_unchecked(v)	atomic64_sub_unchecked(1LL, (v))
449 708
 #define atomic64_dec_return_relaxed(v)	atomic64_sub_return_relaxed(1LL, (v))
450 709
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
451 710
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)

+ 4 - 1
arch/arm/include/asm/cache.h

@@ -4,8 +4,10 @@
4 4
 #ifndef __ASMARM_CACHE_H
5 5
 #define __ASMARM_CACHE_H
6 6
 
7
+#include <linux/const.h>
8
+
7 9
 #define L1_CACHE_SHIFT		CONFIG_ARM_L1_CACHE_SHIFT
8
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
10
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
9 11
 
10 12
 /*
11 13
  * Memory returned by kmalloc() may be used for DMA, so we must make
@@ -24,5 +26,6 @@
24 26
 #endif
25 27
 
26 28
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
29
+#define __read_only __attribute__ ((__section__(".data..read_only")))
27 30
 
28 31
 #endif

+ 1 - 1
arch/arm/include/asm/cacheflush.h

@@ -116,7 +116,7 @@ struct cpu_cache_fns {
116 116
 	void (*dma_unmap_area)(const void *, size_t, int);
117 117
 
118 118
 	void (*dma_flush_range)(const void *, const void *);
119
-};
119
+} __no_const;
120 120
 
121 121
 /*
122 122
  * Select the calling method

+ 13 - 1
arch/arm/include/asm/checksum.h

@@ -37,7 +37,19 @@ __wsum
37 37
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
38 38
 
39 39
 __wsum
40
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
40
+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
41
+
42
+static inline __wsum
43
+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
44
+{
45
+	__wsum ret;
46
+	pax_open_userland();
47
+	ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
48
+	pax_close_userland();
49
+	return ret;
50
+}
51
+
52
+
41 53
 
42 54
 /*
43 55
  * 	Fold a partial checksum without adding pseudo headers

+ 4 - 0
arch/arm/include/asm/cmpxchg.h

@@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
117 117
 	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
118 118
 				   sizeof(*(ptr)));			\
119 119
 })
120
+#define xchg_unchecked(ptr, x) ({					\
121
+	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
122
+				   sizeof(*(ptr)));			\
123
+})
120 124
 
121 125
 #include <asm-generic/cmpxchg-local.h>
122 126
 

+ 1 - 1
arch/arm/include/asm/cpuidle.h

@@ -32,7 +32,7 @@ struct device_node;
32 32
 struct cpuidle_ops {
33 33
 	int (*suspend)(int cpu, unsigned long arg);
34 34
 	int (*init)(struct device_node *, int cpu);
35
-};
35
+} __no_const;
36 36
 
37 37
 struct of_cpuidle_method {
38 38
 	const char *method;

+ 38 - 4
arch/arm/include/asm/domain.h

@@ -42,7 +42,6 @@
42 42
 #define DOMAIN_USER	1
43 43
 #define DOMAIN_IO	0
44 44
 #endif
45
-#define DOMAIN_VECTORS	3
46 45
 
47 46
 /*
48 47
  * Domain types
@@ -51,10 +50,28 @@
51 50
 #define DOMAIN_CLIENT	1
52 51
 #ifdef CONFIG_CPU_USE_DOMAINS
53 52
 #define DOMAIN_MANAGER	3
53
+#define DOMAIN_VECTORS	3
54
+#else
55
+
56
+#ifdef CONFIG_PAX_KERNEXEC
57
+#define DOMAIN_MANAGER	1
58
+#define DOMAIN_KERNEXEC	3
54 59
 #else
55 60
 #define DOMAIN_MANAGER	1
56 61
 #endif
57 62
 
63
+#ifdef CONFIG_PAX_MEMORY_UDEREF
64
+#define DOMAIN_USERCLIENT	0
65
+#define DOMAIN_UDEREF		1
66
+#define DOMAIN_VECTORS		DOMAIN_KERNEL
67
+#else
68
+#define DOMAIN_USERCLIENT	1
69
+#define DOMAIN_VECTORS		DOMAIN_USER
70
+#endif
71
+
72
+#endif
73
+#define DOMAIN_KERNELCLIENT	1
74
+
58 75
 #define domain_mask(dom)	((3) << (2 * (dom)))
59 76
 #define domain_val(dom,type)	((type) << (2 * (dom)))
60 77
 
@@ -62,13 +79,19 @@
62 79
 #define DACR_INIT \
63 80
 	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
64 81
 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
65
-	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
82
+	 domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
66 83
 	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
84
+#elif CONFIG_PAX_MEMORY_UDEREF
85
+	/* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
86
+#define DACR_INIT \
87
+	(domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
88
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
89
+	 domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
67 90
 #else
68 91
 #define DACR_INIT \
69
-	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
92
+	(domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
70 93
 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
71
-	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
94
+	 domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
72 95
 	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
73 96
 #endif
74 97
 
@@ -113,6 +136,17 @@ static inline void set_domain(unsigned val)
113 136
 		set_domain(domain);				\
114 137
 	} while (0)
115 138
 
139
+#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
140
+#define modify_domain(dom,type)					\
141
+	do {							\
142
+		struct thread_info *thread = current_thread_info(); \
143
+		unsigned int domain = get_domain();		\
144
+		domain &= ~domain_mask(dom);			\
145
+		domain = domain | domain_val(dom, type);	\
146
+		thread->cpu_domain = domain;			\
147
+		set_domain(domain);				\
148
+	} while (0)
149
+
116 150
 #else
117 151
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
118 152
 #endif

+ 8 - 1
arch/arm/include/asm/elf.h

@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
117 117
    the loader.  We need to make sure that it is out of the way of the program
118 118
    that it will "exec", and that there is sufficient room for the brk.  */
119 119
 
120
-#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
120
+#define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
121
+
122
+#ifdef CONFIG_PAX_ASLR
123
+#define PAX_ELF_ET_DYN_BASE	0x00008000UL
124
+
125
+#define PAX_DELTA_MMAP_LEN	((current->personality == PER_LINUX_32BIT) ? 16 : 10)
126
+#define PAX_DELTA_STACK_LEN	((current->personality == PER_LINUX_32BIT) ? 16 : 10)
127
+#endif
121 128
 
122 129
 /* When the program starts, a1 contains a pointer to a function to be 
123 130
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 

+ 2 - 0
arch/arm/include/asm/fncpy.h

@@ -81,7 +81,9 @@
81 81
 	BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||		\
82 82
 		(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1)));	\
83 83
 									\
84
+	pax_open_kernel();						\
84 85
 	memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);	\
86
+	pax_close_kernel();						\
85 87
 	flush_icache_range((unsigned long)(dest_buf),			\
86 88
 		(unsigned long)(dest_buf) + (size));			\
87 89
 									\

+ 1 - 0
arch/arm/include/asm/futex.h

@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
107 107
 		return -EFAULT;
108 108
 
109 109
 	preempt_disable();
110
+
110 111
 	__ua_flags = uaccess_save_and_enable();
111 112
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
112 113
 	"1:	" TUSER(ldr) "	%1, [%4]\n"

+ 1 - 1
arch/arm/include/asm/kmap_types.h

@@ -4,6 +4,6 @@
4 4
 /*
5 5
  * This is the "bare minimum".  AIO seems to require this.
6 6
  */
7
-#define KM_TYPE_NR 16
7
+#define KM_TYPE_NR 17
8 8
 
9 9
 #endif

+ 1 - 1
arch/arm/include/asm/mach/dma.h

@@ -22,7 +22,7 @@ struct dma_ops {
22 22
 	int	(*residue)(unsigned int, dma_t *);		/* optional */
23 23
 	int	(*setspeed)(unsigned int, dma_t *, int);	/* optional */
24 24
 	const char *type;
25
-};
25
+} __do_const;
26 26
 
27 27
 struct dma_struct {
28 28
 	void		*addr;		/* single DMA address		*/

+ 9 - 7
arch/arm/include/asm/mach/map.h

@@ -23,17 +23,19 @@ struct map_desc {
23 23
 
24 24
 /* types 0-3 are defined in asm/io.h */
25 25
 enum {
26
-	MT_UNCACHED = 4,
27
-	MT_CACHECLEAN,
28
-	MT_MINICLEAN,
26
+	MT_UNCACHED_RW = 4,
27
+	MT_CACHECLEAN_RO,
28
+	MT_MINICLEAN_RO,
29 29
 	MT_LOW_VECTORS,
30 30
 	MT_HIGH_VECTORS,
31
-	MT_MEMORY_RWX,
31
+	__MT_MEMORY_RWX,
32 32
 	MT_MEMORY_RW,
33
-	MT_ROM,
34
-	MT_MEMORY_RWX_NONCACHED,
33
+	MT_MEMORY_RX,
34
+	MT_ROM_RX,
35
+	MT_MEMORY_RW_NONCACHED,
36
+	MT_MEMORY_RX_NONCACHED,
35 37
 	MT_MEMORY_RW_DTCM,
36
-	MT_MEMORY_RWX_ITCM,
38
+	MT_MEMORY_RX_ITCM,
37 39
 	MT_MEMORY_RW_SO,
38 40
 	MT_MEMORY_DMA_READY,
39 41
 };

+ 1 - 1
arch/arm/include/asm/outercache.h

@@ -39,7 +39,7 @@ struct outer_cache_fns {
39 39
 	/* This is an ARM L2C thing */
40 40
 	void (*write_sec)(unsigned long, unsigned);
41 41
 	void (*configure)(const struct l2x0_regs *);
42
-};
42
+} __no_const;
43 43
 
44 44
 extern struct outer_cache_fns outer_cache;
45 45
 

+ 2 - 1
arch/arm/include/asm/page.h

@@ -23,6 +23,7 @@
23 23
 
24 24
 #else
25 25
 
26
+#include <linux/compiler.h>
26 27
 #include <asm/glue.h>
27 28
 
28 29
 /*
@@ -114,7 +115,7 @@ struct cpu_user_fns {
114 115
 	void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
115 116
 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
116 117
 			unsigned long vaddr, struct vm_area_struct *vma);
117
-};
118
+} __no_const;
118 119
 
119 120
 #ifdef MULTI_USER
120 121
 extern struct cpu_user_fns cpu_user;

+ 20 - 0
arch/arm/include/asm/pgalloc.h

@@ -17,6 +17,7 @@
17 17
 #include <asm/processor.h>
18 18
 #include <asm/cacheflush.h>
19 19
 #include <asm/tlbflush.h>
20
+#include <asm/system_info.h>
20 21
 
21 22
 #define check_pgt_cache()		do { } while (0)
22 23
 
@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
43 44
 	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
44 45
 }
45 46
 
47
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
48
+{
49
+	pud_populate(mm, pud, pmd);
50
+}
51
+
46 52
 #else	/* !CONFIG_ARM_LPAE */
47 53
 
48 54
 /*
@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
51 57
 #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
52 58
 #define pmd_free(mm, pmd)		do { } while (0)
53 59
 #define pud_populate(mm,pmd,pte)	BUG()
60
+#define pud_populate_kernel(mm,pmd,pte)	BUG()
54 61
 
55 62
 #endif	/* CONFIG_ARM_LPAE */
56 63
 
@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
128 135
 	__free_page(pte);
129 136
 }
130 137
 
138
+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
139
+{
140
+#ifdef CONFIG_ARM_LPAE
141
+	pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
142
+#else
143
+	if (addr & SECTION_SIZE)
144
+		pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
145
+	else
146
+		pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
147
+#endif
148
+	flush_pmd_entry(pmdp);
149
+}
150
+
131 151
 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
132 152
 				  pmdval_t prot)
133 153
 {

+ 3 - 1
arch/arm/include/asm/pgtable-2level-hwdef.h

@@ -28,7 +28,7 @@
28 28
 /*
29 29
  *   - section
30 30
  */
31
-#define PMD_SECT_PXN    (_AT(pmdval_t, 1) << 0)     /* v7 */
31
+#define PMD_SECT_PXN		(_AT(pmdval_t, 1) << 0)     /* v7 */
32 32
 #define PMD_SECT_BUFFERABLE	(_AT(pmdval_t, 1) << 2)
33 33
 #define PMD_SECT_CACHEABLE	(_AT(pmdval_t, 1) << 3)
34 34
 #define PMD_SECT_XN		(_AT(pmdval_t, 1) << 4)		/* v6 */
@@ -40,6 +40,7 @@
40 40
 #define PMD_SECT_nG		(_AT(pmdval_t, 1) << 17)	/* v6 */
41 41
 #define PMD_SECT_SUPER		(_AT(pmdval_t, 1) << 18)	/* v6 */
42 42
 #define PMD_SECT_AF		(_AT(pmdval_t, 0))
43
+#define PMD_SECT_RDONLY		(_AT(pmdval_t, 0))
43 44
 
44 45
 #define PMD_SECT_UNCACHED	(_AT(pmdval_t, 0))
45 46
 #define PMD_SECT_BUFFERED	(PMD_SECT_BUFFERABLE)
@@ -69,6 +70,7 @@
69 70
  *   - extended small page/tiny page
70 71
  */
71 72
 #define PTE_EXT_XN		(_AT(pteval_t, 1) << 0)		/* v6 */
73
+#define PTE_EXT_PXN		(_AT(pteval_t, 1) << 2)		/* v7 */
72 74
 #define PTE_EXT_AP_MASK		(_AT(pteval_t, 3) << 4)
73 75
 #define PTE_EXT_AP0		(_AT(pteval_t, 1) << 4)
74 76
 #define PTE_EXT_AP1		(_AT(pteval_t, 2) << 4)

+ 3 - 0
arch/arm/include/asm/pgtable-2level.h

@@ -127,6 +127,9 @@
127 127
 #define L_PTE_SHARED		(_AT(pteval_t, 1) << 10)	/* shared(v6), coherent(xsc3) */
128 128
 #define L_PTE_NONE		(_AT(pteval_t, 1) << 11)
129 129
 
130
+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
131
+#define L_PTE_PXN		(_AT(pteval_t, 0))
132
+
130 133
 /*
131 134
  * These are the memory types, defined to be compatible with
132 135
  * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B

+ 3 - 0
arch/arm/include/asm/pgtable-3level.h

@@ -80,6 +80,7 @@
80 80
 #define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
81 81
 #define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
82 82
 #define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */
83
+#define L_PTE_PXN		(_AT(pteval_t, 1) << 53)	/* PXN */
83 84
 #define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */
84 85
 #define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)
85 86
 #define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
@@ -91,10 +92,12 @@
91 92
 #define L_PMD_SECT_SPLITTING	(_AT(pmdval_t, 1) << 56)
92 93
 #define L_PMD_SECT_NONE		(_AT(pmdval_t, 1) << 57)
93 94
 #define L_PMD_SECT_RDONLY	(_AT(pteval_t, 1) << 58)
95
+#define PMD_SECT_RDONLY		PMD_SECT_AP2
94 96
 
95 97
 /*
96 98
  * To be used in assembly code with the upper page attributes.
97 99
  */
100
+#define L_PTE_PXN_HIGH		(1 << (53 - 32))
98 101
 #define L_PTE_XN_HIGH		(1 << (54 - 32))
99 102
 #define L_PTE_DIRTY_HIGH	(1 << (55 - 32))
100 103
 

+ 51 - 3
arch/arm/include/asm/pgtable.h

@@ -33,6 +33,9 @@
33 33
 #include <asm/pgtable-2level.h>
34 34
 #endif
35 35
 
36
+#define ktla_ktva(addr)		(addr)
37
+#define ktva_ktla(addr)		(addr)
38
+
36 39
 /*
37 40
  * Just any arbitrary offset to the start of the vmalloc VM area: the
38 41
  * current 8MB value just means that there will be a 8MB "hole" after the
@@ -48,6 +51,9 @@
48 51
 #define LIBRARY_TEXT_START	0x0c000000
49 52
 
50 53
 #ifndef __ASSEMBLY__
54
+extern pteval_t __supported_pte_mask;
55
+extern pmdval_t __supported_pmd_mask;
56
+
51 57
 extern void __pte_error(const char *file, int line, pte_t);
52 58
 extern void __pmd_error(const char *file, int line, pmd_t);
53 59
 extern void __pgd_error(const char *file, int line, pgd_t);
@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
56 62
 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
57 63
 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
58 64
 
65
+#define  __HAVE_ARCH_PAX_OPEN_KERNEL
66
+#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
67
+
68
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
69
+#include <asm/domain.h>
70
+#include <linux/thread_info.h>
71
+#include <linux/preempt.h>
72
+
73
+static inline int test_domain(int domain, int domaintype)
74
+{
75
+	return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
76
+}
77
+#endif
78
+
79
+#ifdef CONFIG_PAX_KERNEXEC
80
+static inline unsigned long pax_open_kernel(void) {
81
+#ifdef CONFIG_ARM_LPAE
82
+	/* TODO */
83
+#else
84
+	preempt_disable();
85
+	BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
86
+	modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
87
+#endif
88
+	return 0;
89
+}
90
+
91
+static inline unsigned long pax_close_kernel(void) {
92
+#ifdef CONFIG_ARM_LPAE
93
+	/* TODO */
94
+#else
95
+	BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
96
+	/* DOMAIN_MANAGER = "client" under KERNEXEC */
97
+	modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
98
+	preempt_enable_no_resched();
99
+#endif
100
+	return 0;
101
+}
102
+#else
103
+static inline unsigned long pax_open_kernel(void) { return 0; }
104
+static inline unsigned long pax_close_kernel(void) { return 0; }
105
+#endif
106
+
59 107
 /*
60 108
  * This is the lowest virtual address we can permit any user space
61 109
  * mapping to be mapped at.  This is particularly important for
@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
75 123
 /*
76 124
  * The pgprot_* and protection_map entries will be fixed up in runtime
77 125
  * to include the cachable and bufferable bits based on memory policy,
78
- * as well as any architecture dependent bits like global/ASID and SMP
79
- * shared mapping bits.
126
+ * as well as any architecture dependent bits like global/ASID, PXN,
127
+ * and SMP shared mapping bits.
80 128
  */
81 129
 #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
82 130
 
@@ -306,7 +354,7 @@ static inline pte_t pte_mknexec(pte_t pte)
306 354
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
307 355
 {
308 356
 	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
309
-		L_PTE_NONE | L_PTE_VALID;
357
+		L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
310 358
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
311 359
 	return pte;
312 360
 }

+ 1 - 1
arch/arm/include/asm/smp.h

@@ -108,7 +108,7 @@ struct smp_operations {
108 108
 	int  (*cpu_disable)(unsigned int cpu);
109 109
 #endif
110 110
 #endif
111
-};
111
+} __no_const;
112 112
 
113 113
 struct of_cpu_method {
114 114
 	const char *method;

+ 9 - 1
arch/arm/include/asm/thread_info.h

@@ -73,6 +73,9 @@ struct thread_info {
73 73
 	.flags		= 0,						\
74 74
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
75 75
 	.addr_limit	= KERNEL_DS,					\
76
+	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |	\
77
+			  domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
78
+			  domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),	\
76 79
 }
77 80
 
78 81
 #define init_thread_info	(init_thread_union.thread_info)
@@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
143 146
 #define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
144 147
 #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
145 148
 #define TIF_SECCOMP		7	/* seccomp syscall filtering active */
149
+/* within 8 bits of TIF_SYSCALL_TRACE
150
+ *  to meet flexible second operand requirements
151
+ */
152
+#define TIF_GRSEC_SETXID	8
146 153
 
147 154
 #define TIF_NOHZ		12	/* in adaptive nohz mode */
148 155
 #define TIF_USING_IWMMXT	17
@@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
158 165
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
159 166
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
160 167
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
168
+#define _TIF_GRSEC_SETXID	(1 << TIF_GRSEC_SETXID)
161 169
 
162 170
 /* Checks for any syscall work in entry-common.S */
163 171
 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
164
-			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
172
+			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
165 173
 
166 174
 /*
167 175
  * Change these and you break ASM code in entry-common.S

+ 3 - 0
arch/arm/include/asm/tls.h

@@ -3,6 +3,7 @@
3 3
 
4 4
 #include <linux/compiler.h>
5 5
 #include <asm/thread_info.h>
6
+#include <asm/pgtable.h>
6 7
 
7 8
 #ifdef __ASSEMBLY__
8 9
 #include <asm/asm-offsets.h>
@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
89 90
 			 * at 0xffff0fe0 must be used instead.  (see
90 91
 			 * entry-armv.S for details)
91 92
 			 */
93
+			pax_open_kernel();
92 94
 			*((unsigned int *)0xffff0ff0) = val;
95
+			pax_close_kernel();
93 96
 #endif
94 97
 		}
95 98
 

+ 79 - 34
arch/arm/include/asm/uaccess.h

@@ -18,6 +18,7 @@
18 18
 #include <asm/domain.h>
19 19
 #include <asm/unified.h>
20 20
 #include <asm/compiler.h>
21
+#include <asm/pgtable.h>
21 22
 
22 23
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23 24
 #include <asm-generic/uaccess-unaligned.h>
@@ -50,6 +51,59 @@ struct exception_table_entry
50 51
 extern int fixup_exception(struct pt_regs *regs);
51 52
 
52 53
 /*
54
+ * These two are intentionally not defined anywhere - if the kernel
55
+ * code generates any references to them, that's a bug.
56
+ */
57
+extern int __get_user_bad(void);
58
+extern int __put_user_bad(void);
59
+
60
+/*
61
+ * Note that this is actually 0x1,0000,0000
62
+ */
63
+#define KERNEL_DS	0x00000000
64
+#define get_ds()	(KERNEL_DS)
65
+
66
+#ifdef CONFIG_MMU
67
+
68
+#define USER_DS		TASK_SIZE
69
+#define get_fs()	(current_thread_info()->addr_limit)
70
+
71
+static inline void set_fs(mm_segment_t fs)
72
+{
73
+	current_thread_info()->addr_limit = fs;
74
+	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
75
+}
76
+
77
+#define segment_eq(a, b)	((a) == (b))
78
+
79
+#define __HAVE_ARCH_PAX_OPEN_USERLAND
80
+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
81
+
82
+static inline void pax_open_userland(void)
83
+{
84
+
85
+#ifdef CONFIG_PAX_MEMORY_UDEREF
86
+	if (segment_eq(get_fs(), USER_DS)) {
87
+		BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
88
+		modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
89
+	}
90
+#endif
91
+
92
+}
93
+
94
+static inline void pax_close_userland(void)
95
+{
96
+
97
+#ifdef CONFIG_PAX_MEMORY_UDEREF
98
+	if (segment_eq(get_fs(), USER_DS)) {
99
+		BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
100
+		modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
101
+	}
102
+#endif
103
+
104
+}
105
+
106
+/*
53 107
  * These two functions allow hooking accesses to userspace to increase
54 108
  * system integrity by ensuring that the kernel can not inadvertantly
55 109
  * perform such accesses (eg, via list poison values) which could then
@@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
66 120
 
67 121
 	return old_domain;
68 122
 #else
123
+	pax_open_userland();
69 124
 	return 0;
70 125
 #endif
71 126
 }
@@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
75 130
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 131
 	/* Restore the user access mask */
77 132
 	set_domain(flags);
133
+#else
134
+	pax_close_userland();
78 135
 #endif
79 136
 }
80 137
 
81
-/*
82
- * These two are intentionally not defined anywhere - if the kernel
83
- * code generates any references to them, that's a bug.
84
- */
85
-extern int __get_user_bad(void);
86
-extern int __put_user_bad(void);
87
-
88
-/*
89
- * Note that this is actually 0x1,0000,0000
90
- */
91
-#define KERNEL_DS	0x00000000
92
-#define get_ds()	(KERNEL_DS)
93
-
94
-#ifdef CONFIG_MMU
95
-
96
-#define USER_DS		TASK_SIZE
97
-#define get_fs()	(current_thread_info()->addr_limit)
98
-
99
-static inline void set_fs(mm_segment_t fs)
100
-{
101
-	current_thread_info()->addr_limit = fs;
102
-	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
103
-}
104
-
105
-#define segment_eq(a, b)	((a) == (b))
106
-
107 138
 #define __addr_ok(addr) ({ \
108 139
 	unsigned long flag; \
109 140
 	__asm__("cmp %2, %0; movlo %0, #0" \
@@ -302,6 +333,7 @@ static inline void set_fs(mm_segment_t fs)
302 333
 
303 334
 #endif /* CONFIG_MMU */
304 335
 
336
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
305 337
 #define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)
306 338
 
307 339
 #define user_addr_max() \
@@ -490,39 +522,46 @@ do {									\
490 522
 
491 523
 
492 524
 #ifdef CONFIG_MMU
493
-extern unsigned long __must_check
525
+extern unsigned long __must_check __size_overflow(3)
494 526
 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
495 527
 
496
-static inline unsigned long __must_check
528
+static inline unsigned long __must_check __size_overflow(3)
497 529
 __copy_from_user(void *to, const void __user *from, unsigned long n)
498 530
 {
499
-	unsigned int __ua_flags = uaccess_save_and_enable();
531
+	unsigned int __ua_flags;
532
+
533
+	check_object_size(to, n, false);
534
+	__ua_flags = uaccess_save_and_enable();
500 535
 	n = arm_copy_from_user(to, from, n);
501 536
 	uaccess_restore(__ua_flags);
502 537
 	return n;
503 538
 }
504 539
 
505
-extern unsigned long __must_check
540
+extern unsigned long __must_check __size_overflow(3)
506 541
 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
507
-extern unsigned long __must_check
542
+extern unsigned long __must_check __size_overflow(3)
508 543
 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
509 544
 
510 545
 static inline unsigned long __must_check
511 546
 __copy_to_user(void __user *to, const void *from, unsigned long n)
512 547
 {
513 548
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
514
-	unsigned int __ua_flags = uaccess_save_and_enable();
549
+	unsigned int __ua_flags;
550
+
551
+	check_object_size(from, n, true);
552
+	__ua_flags = uaccess_save_and_enable();
515 553
 	n = arm_copy_to_user(to, from, n);
516 554
 	uaccess_restore(__ua_flags);
517 555
 	return n;
518 556
 #else
557
+	check_object_size(from, n, true);
519 558
 	return arm_copy_to_user(to, from, n);
520 559
 #endif
521 560
 }
522 561
 
523
-extern unsigned long __must_check
562
+extern unsigned long __must_check __size_overflow(2)
524 563
 arm_clear_user(void __user *addr, unsigned long n);
525
-extern unsigned long __must_check
564
+extern unsigned long __must_check __size_overflow(2)
526 565
 __clear_user_std(void __user *addr, unsigned long n);
527 566
 
528 567
 static inline unsigned long __must_check
@@ -542,6 +581,9 @@ __clear_user(void __user *addr, unsigned long n)
542 581
 
543 582
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
544 583
 {
584
+	if ((long)n < 0)
585
+		return n;
586
+
545 587
 	if (access_ok(VERIFY_READ, from, n))
546 588
 		n = __copy_from_user(to, from, n);
547 589
 	else /* security hole - plug it */
@@ -551,6 +593,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
551 593
 
552 594
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
553 595
 {
596
+	if ((long)n < 0)
597
+		return n;
598
+
554 599
 	if (access_ok(VERIFY_WRITE, to, n))
555 600
 		n = __copy_to_user(to, from, n);
556 601
 	return n;

+ 1 - 1
arch/arm/include/uapi/asm/ptrace.h

@@ -92,7 +92,7 @@
92 92
  * ARMv7 groups of PSR bits
93 93
  */
94 94
 #define APSR_MASK	0xf80f0000	/* N, Z, C, V, Q and GE flags */
95
-#define PSR_ISET_MASK	0x01000010	/* ISA state (J, T) mask */
95
+#define PSR_ISET_MASK	0x01000020	/* ISA state (J, T) mask */
96 96
 #define PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */
97 97
 #define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */
98 98
 

+ 1 - 1
arch/arm/kernel/armksyms.c

@@ -58,7 +58,7 @@ EXPORT_SYMBOL(arm_delay_ops);
58 58
 
59 59
 	/* networking */
60 60
 EXPORT_SYMBOL(csum_partial);
61
-EXPORT_SYMBOL(csum_partial_copy_from_user);
61
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
62 62
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
63 63
 EXPORT_SYMBOL(__csum_ipv6_magic);
64 64
 

+ 1 - 1
arch/arm/kernel/cpuidle.c

@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
19 19
 static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
20 20
 	__used __section(__cpuidle_method_of_table_end);
21 21
 
22
-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
22
+static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
23 23
 
24 24
 /**
25 25
  * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()

+ 106 - 3
arch/arm/kernel/entry-armv.S

@@ -50,6 +50,87 @@
50 50
 9997:
51 51
 	.endm
52 52
 
53
+	.macro	pax_enter_kernel
54
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
55
+	@ make aligned space for saved DACR
56
+	sub	sp, sp, #8
57
+	@ save regs
58
+	stmdb	sp!, {r1, r2}
59
+	@ read DACR from cpu_domain into r1
60
+	mov	r2, sp
61
+	@ assume 8K pages, since we have to split the immediate in two
62
+	bic	r2, r2, #(0x1fc0)
63
+	bic	r2, r2, #(0x3f)
64
+	ldr	r1, [r2, #TI_CPU_DOMAIN]
65
+	@ store old DACR on stack
66
+	str	r1, [sp, #8]
67
+#ifdef CONFIG_PAX_KERNEXEC
68
+	@ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
69
+	bic	r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
70
+	orr	r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
71
+#endif
72
+#ifdef CONFIG_PAX_MEMORY_UDEREF
73
+	@ set current DOMAIN_USER to DOMAIN_NOACCESS
74
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
75
+#endif
76
+	@ write r1 to current_thread_info()->cpu_domain
77
+	str	r1, [r2, #TI_CPU_DOMAIN]
78
+	@ write r1 to DACR
79
+	mcr	p15, 0, r1, c3, c0, 0
80
+	@ instruction sync
81
+	instr_sync
82
+	@ restore regs
83
+	ldmia	sp!, {r1, r2}
84
+#endif
85
+	.endm
86
+
87
+	.macro	pax_open_userland
88
+#ifdef CONFIG_PAX_MEMORY_UDEREF
89
+	@ save regs
90
+	stmdb	sp!, {r0, r1}
91
+	@ read DACR from cpu_domain into r1
92
+	mov	r0, sp
93
+	@ assume 8K pages, since we have to split the immediate in two
94
+	bic	r0, r0, #(0x1fc0)
95
+	bic	r0, r0, #(0x3f)
96
+	ldr	r1, [r0, #TI_CPU_DOMAIN]
97
+	@ set current DOMAIN_USER to DOMAIN_CLIENT
98
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
99
+	orr	r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
100
+	@ write r1 to current_thread_info()->cpu_domain
101
+	str	r1, [r0, #TI_CPU_DOMAIN]
102
+	@ write r1 to DACR
103
+	mcr	p15, 0, r1, c3, c0, 0
104
+	@ instruction sync
105
+	instr_sync
106
+	@ restore regs
107
+	ldmia	sp!, {r0, r1}
108
+#endif
109
+	.endm
110
+
111
+	.macro	pax_close_userland
112
+#ifdef CONFIG_PAX_MEMORY_UDEREF
113
+	@ save regs
114
+	stmdb	sp!, {r0, r1}
115
+	@ read DACR from cpu_domain into r1
116
+	mov	r0, sp
117
+	@ assume 8K pages, since we have to split the immediate in two
118
+	bic	r0, r0, #(0x1fc0)
119
+	bic	r0, r0, #(0x3f)
120
+	ldr	r1, [r0, #TI_CPU_DOMAIN]
121
+	@ set current DOMAIN_USER to DOMAIN_NOACCESS
122
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
123
+	@ write r1 to current_thread_info()->cpu_domain
124
+	str	r1, [r0, #TI_CPU_DOMAIN]
125
+	@ write r1 to DACR
126
+	mcr	p15, 0, r1, c3, c0, 0
127
+	@ instruction sync
128
+	instr_sync
129
+	@ restore regs
130
+	ldmia	sp!, {r0, r1}
131
+#endif
132
+	.endm
133
+
53 134
 	.macro	pabt_helper
54 135
 	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
55 136
 #ifdef MULTI_PABORT
@@ -92,11 +173,15 @@
92 173
  * Invalid mode handlers
93 174
  */
94 175
 	.macro	inv_entry, reason
176
+
177
+	pax_enter_kernel
178
+
95 179
 	sub	sp, sp, #S_FRAME_SIZE
96 180
  ARM(	stmib	sp, {r1 - lr}		)
97 181
  THUMB(	stmia	sp, {r0 - r12}		)
98 182
  THUMB(	str	sp, [sp, #S_SP]		)
99 183
  THUMB(	str	lr, [sp, #S_LR]		)
184
+
100 185
 	mov	r1, #\reason
101 186
 	.endm
102 187
 
@@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
152 237
 	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
153 238
  UNWIND(.fnstart		)
154 239
  UNWIND(.save {r0 - pc}		)
240
+
241
+	pax_enter_kernel
242
+
155 243
 	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
156 244
 #ifdef CONFIG_THUMB2_KERNEL
157 245
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
@@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
167 255
 	ldmia	r0, {r3 - r5}
168 256
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
169 257
 	mov	r6, #-1			@  ""  ""      ""       ""
258
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
259
+	@ offset sp by 8 as done in pax_enter_kernel
260
+	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole + 4)
261
+#else
170 262
 	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
263
+#endif
171 264
  SPFIX(	addeq	r2, r2, #4	)
172 265
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
173 266
 					@ from the exception stack
@@ -376,6 +469,9 @@ ENDPROC(__fiq_abt)
376 469
 	.macro	usr_entry, trace=1, uaccess=1
377 470
  UNWIND(.fnstart	)
378 471
  UNWIND(.cantunwind	)	@ don't unwind the user space
472
+
473
+	pax_enter_kernel_user
474
+
379 475
 	sub	sp, sp, #S_FRAME_SIZE
380 476
  ARM(	stmib	sp, {r1 - r12}	)
381 477
  THUMB(	stmia	sp, {r0 - r12}	)
@@ -489,7 +585,9 @@ __und_usr:
489 585
 	tst	r3, #PSR_T_BIT			@ Thumb mode?
490 586
 	bne	__und_usr_thumb
491 587
 	sub	r4, r2, #4			@ ARM instr at LR - 4
588
+	pax_open_userland
492 589
 1:	ldrt	r0, [r4]
590
+	pax_close_userland
493 591
  ARM_BE8(rev	r0, r0)				@ little endian instruction
494 592
 
495 593
 	uaccess_disable ip
@@ -525,11 +623,15 @@ __und_usr_thumb:
525 623
  */
526 624
 	.arch	armv6t2
527 625
 #endif
626
+	pax_open_userland
528 627
 2:	ldrht	r5, [r4]
628
+	pax_close_userland
529 629
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
530 630
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
531 631
 	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
632
+	pax_open_userland
532 633
 3:	ldrht	r0, [r2]
634
+	pax_close_userland
533 635
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
534 636
 	uaccess_disable ip
535 637
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
@@ -560,7 +662,8 @@ ENDPROC(__und_usr)
560 662
  */
561 663
 	.pushsection .text.fixup, "ax"
562 664
 	.align	2
563
-4:	str     r4, [sp, #S_PC]			@ retry current instruction
665
+4:	pax_close_userland
666
+	str     r4, [sp, #S_PC]			@ retry current instruction
564 667
 	ret	r9
565 668
 	.popsection
566 669
 	.pushsection __ex_table,"a"
@@ -782,7 +885,7 @@ ENTRY(__switch_to)
782 885
  THUMB(	str	lr, [ip], #4		   )
783 886
 	ldr	r4, [r2, #TI_TP_VALUE]
784 887
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
785
-#ifdef CONFIG_CPU_USE_DOMAINS
888
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
786 889
 	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
787 890
 	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
788 891
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
@@ -793,7 +896,7 @@ ENTRY(__switch_to)
793 896
 	ldr	r8, =__stack_chk_guard
794 897
 	ldr	r7, [r7, #TSK_STACK_CANARY]
795 898
 #endif
796
-#ifdef CONFIG_CPU_USE_DOMAINS
899
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
797 900
 	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
798 901
 #endif
799 902
 	mov	r5, r0

+ 43 - 5
arch/arm/kernel/entry-common.S

@@ -11,18 +11,46 @@
11 11
 #include <asm/assembler.h>
12 12
 #include <asm/unistd.h>
13 13
 #include <asm/ftrace.h>
14
+#include <asm/domain.h>
14 15
 #include <asm/unwind.h>
15 16
 
17
+#include "entry-header.S"
18
+
16 19
 #ifdef CONFIG_NEED_RET_TO_USER
17 20
 #include <mach/entry-macro.S>
18 21
 #else
19 22
 	.macro  arch_ret_to_user, tmp1, tmp2
23
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
24
+	@ save regs
25
+	stmdb	sp!, {r1, r2}
26
+	@ read DACR from cpu_domain into r1
27
+	mov     r2, sp
28
+	@ assume 8K pages, since we have to split the immediate in two
29
+	bic     r2, r2, #(0x1fc0)
30
+	bic     r2, r2, #(0x3f)
31
+	ldr     r1, [r2, #TI_CPU_DOMAIN]
32
+#ifdef CONFIG_PAX_KERNEXEC
33
+	@ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
34
+	bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
35
+	orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
36
+#endif
37
+#ifdef CONFIG_PAX_MEMORY_UDEREF
38
+	@ set current DOMAIN_USER to DOMAIN_UDEREF
39
+	bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
40
+	orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
41
+#endif
42
+	@ write r1 to current_thread_info()->cpu_domain
43
+	str     r1, [r2, #TI_CPU_DOMAIN]
44
+	@ write r1 to DACR
45
+	mcr     p15, 0, r1, c3, c0, 0
46
+	@ instruction sync
47
+	instr_sync
48
+	@ restore regs
49
+	ldmia	sp!, {r1, r2}
50
+#endif
20 51
 	.endm
21 52
 #endif
22 53
 
23
-#include "entry-header.S"
24
-
25