1 /*
2 * Based on Linux v4.6 arch/arm64/kernel.ins.c
3 *
4 * Copyright (C) 2013 Huawei Ltd.
5 * Author: Jiang Liu <liuj97@gmail.com>
6 *
7 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <xen/types.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/sizes.h>
25 #include <xen/bitops.h>
26 #include <asm/insn.h>
27 #include <asm/arm64/insn.h>
28
29 #define __kprobes
30 #define pr_err(fmt, ...) printk(XENLOG_ERR fmt, ## __VA_ARGS__)
31
aarch64_insn_is_branch_imm(u32 insn)32 bool aarch64_insn_is_branch_imm(u32 insn)
33 {
34 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
35 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
36 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
37 aarch64_insn_is_bcond(insn));
38 }
39
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)40 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
41 u32 *maskp, int *shiftp)
42 {
43 u32 mask;
44 int shift;
45
46 switch (type) {
47 case AARCH64_INSN_IMM_26:
48 mask = BIT(26) - 1;
49 shift = 0;
50 break;
51 case AARCH64_INSN_IMM_19:
52 mask = BIT(19) - 1;
53 shift = 5;
54 break;
55 case AARCH64_INSN_IMM_16:
56 mask = BIT(16) - 1;
57 shift = 5;
58 break;
59 case AARCH64_INSN_IMM_14:
60 mask = BIT(14) - 1;
61 shift = 5;
62 break;
63 case AARCH64_INSN_IMM_12:
64 mask = BIT(12) - 1;
65 shift = 10;
66 break;
67 case AARCH64_INSN_IMM_9:
68 mask = BIT(9) - 1;
69 shift = 12;
70 break;
71 case AARCH64_INSN_IMM_7:
72 mask = BIT(7) - 1;
73 shift = 15;
74 break;
75 case AARCH64_INSN_IMM_6:
76 case AARCH64_INSN_IMM_S:
77 mask = BIT(6) - 1;
78 shift = 10;
79 break;
80 case AARCH64_INSN_IMM_R:
81 mask = BIT(6) - 1;
82 shift = 16;
83 break;
84 default:
85 return -EINVAL;
86 }
87
88 *maskp = mask;
89 *shiftp = shift;
90
91 return 0;
92 }
93
94 #define ADR_IMM_HILOSPLIT 2
95 #define ADR_IMM_SIZE SZ_2M
96 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
97 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
98 #define ADR_IMM_LOSHIFT 29
99 #define ADR_IMM_HISHIFT 5
100
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)101 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
102 {
103 u32 immlo, immhi, mask;
104 int shift;
105
106 switch (type) {
107 case AARCH64_INSN_IMM_ADR:
108 shift = 0;
109 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
110 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
111 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
112 mask = ADR_IMM_SIZE - 1;
113 break;
114 default:
115 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
116 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
117 type);
118 return 0;
119 }
120 }
121
122 return (insn >> shift) & mask;
123 }
124
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)125 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
126 u32 insn, u64 imm)
127 {
128 u32 immlo, immhi, mask;
129 int shift;
130
131 if (insn == AARCH64_BREAK_FAULT)
132 return AARCH64_BREAK_FAULT;
133
134 switch (type) {
135 case AARCH64_INSN_IMM_ADR:
136 shift = 0;
137 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
138 imm >>= ADR_IMM_HILOSPLIT;
139 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
140 imm = immlo | immhi;
141 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
142 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
143 break;
144 default:
145 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
146 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
147 type);
148 return AARCH64_BREAK_FAULT;
149 }
150 }
151
152 /* Update the immediate field. */
153 insn &= ~(mask << shift);
154 insn |= (imm & mask) << shift;
155
156 return insn;
157 }
158
branch_imm_common(unsigned long pc,unsigned long addr,long range)159 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
160 long range)
161 {
162 long offset;
163
164 if ((pc & 0x3) || (addr & 0x3)) {
165 pr_err("%s: A64 instructions must be word aligned\n", __func__);
166 return range;
167 }
168
169 offset = ((long)addr - (long)pc);
170
171 if (offset < -range || offset >= range) {
172 pr_err("%s: offset out of range\n", __func__);
173 return range;
174 }
175
176 return offset;
177 }
178
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)179 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
180 enum aarch64_insn_branch_type type)
181 {
182 u32 insn;
183 long offset;
184
185 /*
186 * B/BL support [-128M, 128M) offset
187 * ARM64 virtual address arrangement guarantees all kernel and module
188 * texts are within +/-128M.
189 */
190 offset = branch_imm_common(pc, addr, SZ_128M);
191 if (offset >= SZ_128M)
192 return AARCH64_BREAK_FAULT;
193
194 switch (type) {
195 case AARCH64_INSN_BRANCH_LINK:
196 insn = aarch64_insn_get_bl_value();
197 break;
198 case AARCH64_INSN_BRANCH_NOLINK:
199 insn = aarch64_insn_get_b_value();
200 break;
201 default:
202 pr_err("%s: unknown branch encoding %d\n", __func__, type);
203 return AARCH64_BREAK_FAULT;
204 }
205
206 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
207 offset >> 2);
208 }
209
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)210 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
211 {
212 return aarch64_insn_get_hint_value() | op;
213 }
214
aarch64_insn_gen_nop(void)215 u32 __kprobes aarch64_insn_gen_nop(void)
216 {
217 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
218 }
219
220 /*
221 * Decode the imm field of a branch, and return the byte offset as a
222 * signed value (so it can be used when computing a new branch
223 * target).
224 */
aarch64_get_branch_offset(u32 insn)225 s32 aarch64_get_branch_offset(u32 insn)
226 {
227 s32 imm;
228
229 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
230 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
231 return (imm << 6) >> 4;
232 }
233
234 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
235 aarch64_insn_is_bcond(insn)) {
236 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
237 return (imm << 13) >> 11;
238 }
239
240 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
241 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
242 return (imm << 18) >> 16;
243 }
244
245 /* Unhandled instruction */
246 BUG();
247 }
248
249 /*
250 * Encode the displacement of a branch in the imm field and return the
251 * updated instruction.
252 */
aarch64_set_branch_offset(u32 insn,s32 offset)253 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
254 {
255 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
256 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
257 offset >> 2);
258
259 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
260 aarch64_insn_is_bcond(insn))
261 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
262 offset >> 2);
263
264 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
265 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
266 offset >> 2);
267
268 /* Unhandled instruction */
269 BUG();
270 }
271
272 /*
273 * Local variables:
274 * mode: C
275 * c-file-style: "BSD"
276 * c-basic-offset: 8
277 * indent-tabs-mode: t
278 * End:
279 */
280