1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
|
From: Ben Hutchings <ben@decadent.org.uk>
Date: Tue, 26 Jun 2018 16:59:01 +0100
Subject: Export symbols needed by Android drivers
Bug-Debian: https://bugs.debian.org/901492
We want to enable use of the Android ashmem and binder drivers to
support Anbox, but they should not be built-in as that would waste
resources and increase security attack surface on systems that don't
need them.
Export the currently un-exported symbols they depend on.
---
--- a/fs/file.c
+++ b/fs/file.c
@@ -409,6 +409,7 @@ struct files_struct *get_files_struct(st
return files;
}
+EXPORT_SYMBOL_GPL(get_files_struct);
void put_files_struct(struct files_struct *files)
{
@@ -421,6 +422,7 @@ void put_files_struct(struct files_struc
kmem_cache_free(files_cachep, files);
}
}
+EXPORT_SYMBOL_GPL(put_files_struct);
void reset_files_struct(struct files_struct *files)
{
@@ -534,6 +536,7 @@ out:
spin_unlock(&files->file_lock);
return error;
}
+EXPORT_SYMBOL_GPL(__alloc_fd);
static int alloc_fd(unsigned start, unsigned flags)
{
@@ -607,6 +610,7 @@ void __fd_install(struct files_struct *f
rcu_assign_pointer(fdt->fd[fd], file);
rcu_read_unlock_sched();
}
+EXPORT_SYMBOL_GPL(__fd_install);
void fd_install(unsigned int fd, struct file *file)
{
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1054,6 +1054,7 @@ void mmput_async(struct mm_struct *mm)
schedule_work(&mm->async_put_work);
}
}
+EXPORT_SYMBOL_GPL(mmput_async);
#endif
/**
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1368,6 +1368,7 @@ struct sighand_struct *__lock_task_sigha
return sighand;
}
+EXPORT_SYMBOL_GPL(__lock_task_sighand);
/*
* send signal info to all the members of a group
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1644,6 +1644,7 @@ void zap_page_range(struct vm_area_struc
mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end);
}
+EXPORT_SYMBOL_GPL(zap_page_range);
/**
* zap_page_range_single - remove user pages in a given range
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4053,6 +4053,7 @@ int shmem_zero_setup(struct vm_area_stru
return 0;
}
+EXPORT_SYMBOL_GPL(shmem_zero_setup);
/**
* shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1300,6 +1300,7 @@ int map_kernel_range_noflush(unsigned lo
{
return vmap_page_range_noflush(addr, addr + size, prot, pages);
}
+EXPORT_SYMBOL_GPL(map_kernel_range_noflush);
/**
* unmap_kernel_range_noflush - unmap kernel VM area
@@ -1440,6 +1441,7 @@ struct vm_struct *get_vm_area(unsigned l
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
}
+EXPORT_SYMBOL_GPL(get_vm_area);
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
--- a/security/security.c
+++ b/security/security.c
@@ -236,24 +236,28 @@ int security_binder_set_context_mgr(cons
{
return call_int_hook(binder_set_context_mgr, 0, mgr);
}
+EXPORT_SYMBOL_GPL(security_binder_set_context_mgr);
int security_binder_transaction(const struct cred *from,
const struct cred *to)
{
return call_int_hook(binder_transaction, 0, from, to);
}
+EXPORT_SYMBOL_GPL(security_binder_transaction);
int security_binder_transfer_binder(const struct cred *from,
const struct cred *to)
{
return call_int_hook(binder_transfer_binder, 0, from, to);
}
+EXPORT_SYMBOL_GPL(security_binder_transfer_binder);
int security_binder_transfer_file(const struct cred *from,
const struct cred *to, struct file *file)
{
return call_int_hook(binder_transfer_file, 0, from, to, file);
}
+EXPORT_SYMBOL_GPL(security_binder_transfer_file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3979,6 +3979,7 @@ int can_nice(const struct task_struct *p
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
+EXPORT_SYMBOL_GPL(can_nice);
#ifdef __ARCH_WANT_SYS_NICE
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -215,6 +215,7 @@ void __wake_up_pollfree(struct wait_queu
/* POLLFREE must have cleared the queue. */
WARN_ON_ONCE(waitqueue_active(wq_head));
}
+EXPORT_SYMBOL_GPL(__wake_up_pollfree);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
|