(empty log message)
@@ -0,0 +1,255 @@ | ||
1 | +/* | |
2 | + * check.c | |
3 | + * | |
4 | + * # echo 'noptrace-objs := check.o probe.o' > noptrace/Makefile | |
5 | + * # echo 'obj-m += noptrace.o' >> noptrace/Makefile | |
6 | + * # make -s SUBDIRS=$PWD/noptrace modules | |
7 | + * # make -s SUBDIRS=$PWD/noptrace modules_install | |
8 | + */ | |
9 | +#include <linux/module.h> | |
10 | +#include <linux/init.h> | |
11 | +#include <linux/security.h> | |
12 | +#include <linux/version.h> | |
13 | +#include "probe.h" | |
14 | + | |
15 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) | |
16 | +static inline _Bool my_check_task(void) | |
17 | +{ | |
18 | + return uid_gte(current_uid(), KUIDT_INIT(1000)); | |
19 | +} | |
20 | +#else | |
21 | +#ifndef current_uid | |
22 | +#define current_uid() (current->uid) | |
23 | +#endif | |
24 | +static inline _Bool my_check_task(void) | |
25 | +{ | |
26 | + return current_uid() >= 1000; | |
27 | +} | |
28 | +#endif | |
29 | + | |
30 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) | |
31 | + | |
32 | +static int my_ptrace_access_check(struct task_struct *child, unsigned int mode) | |
33 | +{ | |
34 | + return my_check_task() ? -EPERM : 0; | |
35 | +} | |
36 | + | |
37 | +static int my_ptrace_traceme(struct task_struct *parent) | |
38 | +{ | |
39 | + return my_check_task() ? -EPERM : 0; | |
40 | +} | |
41 | + | |
42 | +#define MY_HOOK_INIT(HEAD, HOOK) \ | |
43 | + { .head = &probe_dummy_security_hook_heads.HEAD, \ | |
44 | + .hook = { .HEAD = HOOK } } | |
45 | + | |
46 | +static struct security_hook_list my_hooks[] = { | |
47 | + MY_HOOK_INIT(ptrace_traceme, my_ptrace_traceme), | |
48 | + MY_HOOK_INIT(ptrace_access_check, my_ptrace_access_check) | |
49 | +}; | |
50 | + | |
51 | +static inline void add_hook(struct security_hook_list *hook) | |
52 | +{ | |
53 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) | |
54 | + hlist_add_tail_rcu(&hook->list, hook->head); | |
55 | +#else | |
56 | + list_add_tail_rcu(&hook->list, hook->head); | |
57 | +#endif | |
58 | +} | |
59 | + | |
60 | +#if defined(CONFIG_STRICT_KERNEL_RWX) && !defined(CONFIG_SECURITY_WRITABLE_HOOKS) | |
61 | +#include <linux/uaccess.h> /* probe_kernel_write() */ | |
62 | +#define NEED_TO_CHECK_HOOKS_ARE_WRITABLE | |
63 | + | |
64 | +#if defined(CONFIG_X86) | |
65 | +#define MAX_RO_PAGES 1024 | |
66 | +static struct page *ro_pages[MAX_RO_PAGES] __initdata; | |
67 | +static unsigned int ro_pages_len __initdata; | |
68 | + | |
69 | +static bool __init lsm_test_page_ro(void *addr) | |
70 | +{ | |
71 | + unsigned int i; | |
72 | + int unused; | |
73 | + struct page *page; | |
74 | + | |
75 | + page = (struct page *) lookup_address((unsigned long) addr, &unused); | |
76 | + if (!page) | |
77 | + return false; | |
78 | + if (test_bit(_PAGE_BIT_RW, &(page->flags))) | |
79 | + return true; | |
80 | + for (i = 0; i < ro_pages_len; i++) | |
81 | + if (page == ro_pages[i]) | |
82 | + return true; | |
83 | + if (ro_pages_len == MAX_RO_PAGES) | |
84 | + return false; | |
85 | + ro_pages[ro_pages_len++] = page; | |
86 | + return true; | |
87 | +} | |
88 | + | |
89 | +static bool __init check_ro_pages(struct security_hook_heads *hooks) | |
90 | +{ | |
91 | + int i; | |
92 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) | |
93 | + struct hlist_head *list = &hooks->capable; | |
94 | + | |
95 | + if (!probe_kernel_write(list, list, sizeof(void *))) | |
96 | + return true; | |
97 | + for (i = 0; i < ARRAY_SIZE(my_hooks); i++) { | |
98 | + struct hlist_head *head = my_hooks[i].head; | |
99 | + struct security_hook_list *shp; | |
100 | + | |
101 | + if (!lsm_test_page_ro(&head->first)) | |
102 | + return false; | |
103 | + hlist_for_each_entry(shp, head, list) | |
104 | + if (!lsm_test_page_ro(&shp->list.next) || | |
105 | + !lsm_test_page_ro(&shp->list.pprev)) | |
106 | + return false; | |
107 | + } | |
108 | +#else | |
109 | + struct list_head *list = &hooks->capable; | |
110 | + | |
111 | + if (!probe_kernel_write(list, list, sizeof(void *))) | |
112 | + return true; | |
113 | + for (i = 0; i < ARRAY_SIZE(my_hooks); i++) { | |
114 | + struct list_head *head = my_hooks[i].head; | |
115 | + struct security_hook_list *shp; | |
116 | + | |
117 | + if (!lsm_test_page_ro(&head->next) || | |
118 | + !lsm_test_page_ro(&head->prev)) | |
119 | + return false; | |
120 | + list_for_each_entry(shp, head, list) | |
121 | + if (!lsm_test_page_ro(&shp->list.next) || | |
122 | + !lsm_test_page_ro(&shp->list.prev)) | |
123 | + return false; | |
124 | + } | |
125 | +#endif | |
126 | + return true; | |
127 | +} | |
128 | +#else | |
129 | +static bool __init check_ro_pages(struct security_hook_heads *hooks) | |
130 | +{ | |
131 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) | |
132 | + struct hlist_head *list = &hooks->capable; | |
133 | +#else | |
134 | + struct list_head *list = &hooks->capable; | |
135 | +#endif | |
136 | + | |
137 | + return !probe_kernel_write(list, list, sizeof(void *)); | |
138 | +} | |
139 | +#endif | |
140 | +#endif | |
141 | + | |
142 | +static int __init noptrace_init(void) | |
143 | +{ | |
144 | + int idx; | |
145 | + struct security_hook_heads *hooks = probe_security_hook_heads(); | |
146 | + | |
147 | + if (!hooks) | |
148 | + return -EINVAL; | |
149 | + for (idx = 0; idx < ARRAY_SIZE(my_hooks); idx++) | |
150 | + my_hooks[idx].head = ((void *) hooks) | |
151 | + + ((unsigned long) my_hooks[idx].head) | |
152 | + - ((unsigned long) &probe_dummy_security_hook_heads); | |
153 | +#if defined(NEED_TO_CHECK_HOOKS_ARE_WRITABLE) | |
154 | + if (!check_ro_pages(hooks)) { | |
155 | + printk(KERN_INFO "Can't update security_hook_heads due to write protected. Retry with rodata=0 kernel command line option added.\n"); | |
156 | + return -EINVAL; | |
157 | + } | |
158 | +#endif | |
159 | +#if defined(NEED_TO_CHECK_HOOKS_ARE_WRITABLE) && defined(CONFIG_X86) | |
160 | + for (idx = 0; idx < ro_pages_len; idx++) | |
161 | + set_bit(_PAGE_BIT_RW, &(ro_pages[idx]->flags)); | |
162 | +#endif | |
163 | + for (idx = 0; idx < ARRAY_SIZE(my_hooks); idx++) | |
164 | + add_hook(&my_hooks[idx]); | |
165 | +#if defined(NEED_TO_CHECK_HOOKS_ARE_WRITABLE) && defined(CONFIG_X86) | |
166 | + for (idx = 0; idx < ro_pages_len; idx++) | |
167 | + clear_bit(_PAGE_BIT_RW, &(ro_pages[idx]->flags)); | |
168 | +#endif | |
169 | + printk(KERN_INFO "ptrace restriction enabled.\n"); | |
170 | + return 0; | |
171 | +} | |
172 | + | |
173 | +#else | |
174 | + | |
175 | +/* Function pointers originally registered by register_security(). */ | |
176 | +static struct security_operations original_security_ops /* = *security_ops; */; | |
177 | + | |
178 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) | |
179 | + | |
180 | +static int my_ptrace_access_check(struct task_struct *child, unsigned int mode) | |
181 | +{ | |
182 | + if (my_check_task()) | |
183 | + return -EPERM; | |
184 | + while (!original_security_ops.ptrace_access_check) | |
185 | + smp_rmb(); | |
186 | + return original_security_ops.ptrace_access_check(child, mode); | |
187 | +} | |
188 | + | |
189 | +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) | |
190 | + | |
191 | +static int my_ptrace_may_access(struct task_struct *child, unsigned int mode) | |
192 | +{ | |
193 | + if (my_check_task()) | |
194 | + return -EPERM; | |
195 | + while (!original_security_ops.ptrace_may_access) | |
196 | + smp_rmb(); | |
197 | + return original_security_ops.ptrace_may_access(child, mode); | |
198 | +} | |
199 | + | |
200 | +#endif | |
201 | + | |
202 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) | |
203 | + | |
204 | +static int my_ptrace_traceme(struct task_struct *parent) | |
205 | +{ | |
206 | + if (my_check_task()) | |
207 | + return -EPERM; | |
208 | + while (!original_security_ops.ptrace_traceme) | |
209 | + smp_rmb(); | |
210 | + return original_security_ops.ptrace_traceme(parent); | |
211 | +} | |
212 | + | |
213 | +#else | |
214 | + | |
215 | +static inline int my_ptrace(struct task_struct *parent, | |
216 | + struct task_struct *child) | |
217 | +{ | |
218 | + if (my_check_task()) | |
219 | + return -EPERM; | |
220 | + while (!original_security_ops.ptrace) | |
221 | + smp_rmb(); | |
222 | + return original_security_ops.ptrace(parent, child); | |
223 | +} | |
224 | + | |
225 | +#endif | |
226 | + | |
227 | +#define swap_security_ops(op) \ | |
228 | + original_security_ops.op = ops->op; \ | |
229 | + smp_wmb(); ops->op = my_##op | |
230 | + | |
231 | +static int __init noptrace_init(void) | |
232 | +{ | |
233 | + struct security_operations *ops = probe_security_ops(); | |
234 | + | |
235 | + if (!ops) | |
236 | + return -EINVAL; | |
237 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) | |
238 | + swap_security_ops(ptrace_traceme); | |
239 | + swap_security_ops(ptrace_access_check); | |
240 | +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) | |
241 | + swap_security_ops(ptrace_traceme); | |
242 | + swap_security_ops(ptrace_may_access); | |
243 | +#else | |
244 | + swap_security_ops(ptrace); | |
245 | +#endif | |
246 | + printk(KERN_INFO "ptrace restriction enabled.\n"); | |
247 | + return 0; | |
248 | +} | |
249 | + | |
250 | +#undef swap_security_ops | |
251 | + | |
252 | +#endif | |
253 | + | |
254 | +module_init(noptrace_init); | |
255 | +MODULE_LICENSE("GPL"); |