Line | Branch | Exec | Source |
---|---|---|---|
1 | #include "gwion_util.h" | ||
2 | #include "gwion_ast.h" | ||
3 | #include "gwion_env.h" | ||
4 | #include "vm.h" | ||
5 | #include "gwion.h" | ||
6 | #include "instr.h" | ||
7 | #include "object.h" | ||
8 | #include "array.h" | ||
9 | #include "emit.h" | ||
10 | #include "operator.h" | ||
11 | #include "import.h" | ||
12 | #include "traverse.h" | ||
13 | #include "parse.h" | ||
14 | #include "gwi.h" | ||
15 | #include "emit.h" | ||
16 | #include "looper.h" | ||
17 | |||
18 | 26 | static DTOR(array_dtor) { | |
19 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 26 times.
|
26 | if (*(void **)(o->data + SZ_INT)) xfree(*(void **)(o->data + SZ_INT)); |
20 | 26 | struct M_Vector_ *a = ARRAY(o); | |
21 | 26 | m_vector_release(a); | |
22 | 26 | } | |
23 | |||
24 | 8 | static DTOR(array_dtor_obj) { | |
25 | 8 | struct M_Vector_ *a = ARRAY(o); | |
26 |
2/2✓ Branch 0 taken 13 times.
✓ Branch 1 taken 8 times.
|
21 | for (m_uint i = 0; i < ARRAY_LEN(a); ++i) |
27 | 13 | release(*(M_Object *)(ARRAY_PTR(a) + i * SZ_INT), shred); | |
28 | 8 | } | |
29 | |||
30 | ✗ | static DTOR(array_dtor_struct) { | |
31 | ✗ | struct M_Vector_ *a = ARRAY(o); | |
32 | ✗ | for (m_uint i = 0; i < ARRAY_LEN(a); ++i) | |
33 | ✗ | struct_release(shred, array_base(o->type_ref), | |
34 | ✗ | &*(m_bit *)(ARRAY_PTR(a) + i * SZ_INT)); | |
35 | } | ||
36 | |||
37 | 29 | ANN M_Object new_array(MemPool p, const Type t, const m_uint length) { | |
38 | 29 | const M_Object a = new_object(p, t); | |
39 | 29 | const m_uint depth = | |
40 |
2/2✓ Branch 1 taken 24 times.
✓ Branch 2 taken 5 times.
|
29 | !tflag(t, tflag_typedef) ? t->array_depth : t->info->parent->array_depth; |
41 | // const m_uint size = depth > 1 ? SZ_INT : array_base(t)->size; | ||
42 |
3/4✓ Branch 0 taken 25 times.
✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 25 times.
|
29 | const m_uint size = depth > 1 ? SZ_INT : array_base(t)->actual_size ?: array_base(t)->size; |
43 | //ARRAY(a) = new_m_vector(p, size, length); | ||
44 | 29 | m_vector_init(ARRAY(a), size, length); | |
45 | 29 | return a; | |
46 | } | ||
47 | |||
48 | ✗ | static MFUN(vm_vector_rem) { | |
49 | ✗ | const m_int index = *(m_int *)(shred->mem + SZ_INT); | |
50 | ✗ | const M_Vector v = ARRAY(o); | |
51 | ✗ | if (index < 0 || (m_uint)index >= ARRAY_LEN(v)) return; | |
52 | ✗ | m_vector_rem(v, (vtype)index); | |
53 | } | ||
54 | |||
55 | ✗ | static MFUN(vm_vector_rem_obj) { | |
56 | ✗ | const m_int index = *(m_int *)(shred->mem + SZ_INT); | |
57 | ✗ | const M_Vector v = ARRAY(o); | |
58 | ✗ | if (index < 0 || (m_uint)index >= ARRAY_LEN(v)) return; | |
59 | ✗ | release(*(M_Object *)(ARRAY_PTR(v) + index * ARRAY_SIZE(v)), shred); | |
60 | ✗ | m_vector_rem(v, (vtype)index); | |
61 | } | ||
62 | |||
63 | ✗ | static MFUN(vm_vector_rem_struct) { | |
64 | ✗ | const m_int index = *(m_int *)(shred->mem + SZ_INT); | |
65 | ✗ | const M_Vector v = ARRAY(o); | |
66 | ✗ | if (index < 0 || (m_uint)index >= ARRAY_LEN(v)) return; | |
67 | ✗ | const Type t = o->type_ref; | |
68 | ✗ | struct_release(shred, array_base(t), ARRAY_PTR(v) + index * ARRAY_SIZE(v)); | |
69 | ✗ | m_vector_rem(v, (vtype)index); | |
70 | } | ||
71 | |||
72 | ✗ | static MFUN(vm_vector_insert) { | |
73 | ✗ | const m_int index = *(m_int *)(shred->mem + SZ_INT); | |
74 | ✗ | const M_Vector v = ARRAY(o); | |
75 | ✗ | if (index < 0 || (m_uint)index > ARRAY_LEN(v)) return; | |
76 | ✗ | m_vector_insert(v, index, shred->mem + SZ_INT * 2); | |
77 | } | ||
78 | |||
79 | ✗ | static MFUN(vm_vector_insert_obj) { | |
80 | ✗ | const m_int index = *(m_int *)(shred->mem + SZ_INT); | |
81 | ✗ | const M_Vector v = ARRAY(o); | |
82 | ✗ | if (index < 0 || (m_uint)index > ARRAY_LEN(v)) return; | |
83 | ✗ | m_vector_insert(v, index, shred->mem + SZ_INT * 2); | |
84 | ✗ | ++(*(M_Object *)(shred->mem + SZ_INT * 2))->ref; | |
85 | } | ||
86 | |||
87 | ✗ | static MFUN(vm_vector_insert_struct) { | |
88 | ✗ | const m_int index = *(m_int *)(shred->mem + SZ_INT); | |
89 | ✗ | const M_Vector v = ARRAY(o); | |
90 | ✗ | if (index < 0 || (m_uint)index > ARRAY_LEN(v)) return; | |
91 | ✗ | m_vector_insert(v, index, shred->mem + SZ_INT * 2); | |
92 | ✗ | struct_addref(shred->info->vm->gwion, array_base(o->type_ref), | |
93 | ✗ | shred->mem + SZ_INT * 2); | |
94 | } | ||
95 | |||
96 | 4 | static MFUN(vm_vector_size) { *(m_uint *)RETURN = ARRAY_LEN(ARRAY(o)); } | |
97 | |||
98 | ✗ | static MFUN(vm_vector_depth) { *(m_uint *)RETURN = o->type_ref->array_depth; } | |
99 | |||
100 | ✗ | static MFUN(vm_vector_cap) { *(m_uint *)RETURN = ARRAY_CAP(ARRAY(o)); } | |
101 | |||
102 | ✗ | static MFUN(vm_vector_random) { | |
103 | ✗ | const M_Vector array = ARRAY(o); | |
104 | ✗ | const m_uint sz = ARRAY_LEN(array); | |
105 | ✗ | const m_uint idx = | |
106 | ✗ | (m_int)(sz) * (gw_rand(shred->info->vm->rand) / (UINT32_MAX + 1.0)); | |
107 | ✗ | m_vector_get(array, idx, (void *)RETURN); | |
108 | } | ||
109 | |||
110 | #define ARRAY_OPCK(a, b, pos) \ | ||
111 | const Type l = array_base(a->type); \ | ||
112 | const Type r = array_base(b->type); \ | ||
113 | if (isa(r, l) < 0) ERR_N(pos, _("array types do not match.")); | ||
114 | |||
115 | 5 | static OP_CHECK(opck_array_at) { | |
116 | 5 | const Exp_Binary *bin = (Exp_Binary *)data; | |
117 |
2/2✓ Branch 1 taken 1 times.
✓ Branch 2 taken 4 times.
|
5 | CHECK_NN(opck_const_rhs(env, data)); |
118 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | if (bin->lhs->type != env->gwion->type[et_error]) { |
119 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 3 times.
|
4 | ARRAY_OPCK(bin->lhs, bin->rhs, exp_self(bin)->pos) |
120 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
|
3 | if (bin->lhs->type->array_depth != bin->rhs->type->array_depth) |
121 | 1 | ERR_N(exp_self(bin)->pos, _("array depths do not match.")); | |
122 | } | ||
123 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (bin->rhs->exp_type == ae_exp_decl) { |
124 | 2 | Type_Decl *td = bin->rhs->d.exp_decl.td; | |
125 |
3/4✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 1 times.
✗ Branch 3 not taken.
|
2 | if (td->array && td->array->exp) |
126 | 1 | ERR_N(exp_self(bin)->pos, | |
127 | _("do not provide array for 'xxx => declaration'.")); | ||
128 | 1 | SET_FLAG(bin->rhs->d.exp_decl.vd.value, late); | |
129 | } | ||
130 | 1 | bin->rhs->ref = bin->lhs; | |
131 | // bin->rhs->data = bin->lhs; | ||
132 | 1 | exp_setvar(bin->rhs, 1); | |
133 | 1 | return bin->rhs->type; | |
134 | } | ||
135 | |||
136 | ✗ | ANN static inline bool shift_match(const Type base, const Type more) { | |
137 | ✗ | return get_depth(base) == get_depth(more); | |
138 | } | ||
139 | |||
140 | 1 | ANN static Type check_array_shift(const Env env, const Exp a, const Exp b, | |
141 | const m_str str, const loc_t pos) { | ||
142 | /* if(a->type == env->gwion->type[et_error] && | ||
143 | b->type->array_depth > 1) | ||
144 | return a->type;*/ | ||
145 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | ARRAY_OPCK(a, b, pos) |
146 | 1 | const m_int diff = get_depth(a->type) - get_depth(b->type); | |
147 |
2/4✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 1 times.
|
1 | if (diff >= 0 && diff <= 1) |
148 | ✗ | return a->type; | |
149 | 1 | ERR_N(pos, "array depths do not match for '%s'.", str); | |
150 | } | ||
151 | |||
152 | 1 | static OP_CHECK(opck_array_sl) { | |
153 | 1 | const Exp_Binary *bin = (Exp_Binary *)data; | |
154 | 1 | return check_array_shift(env, bin->lhs, bin->rhs, "<<", exp_self(bin)->pos); | |
155 | } | ||
156 | |||
157 | ✗ | static OP_CHECK(opck_array_sr) { | |
158 | ✗ | const Exp_Binary *bin = (Exp_Binary *)data; | |
159 | ✗ | return check_array_shift(env, bin->rhs, bin->lhs, ">>", exp_self(bin)->pos); | |
160 | } | ||
161 | |||
162 | ✗ | ANN static inline m_bool emit_array_shift(const Emitter emit, | |
163 | const f_instr exec) { | ||
164 | ✗ | emit_regmove(emit, -SZ_INT); | |
165 | ✗ | (void)emit_add_instr(emit, exec); | |
166 | ✗ | return GW_OK; | |
167 | } | ||
168 | |||
169 | ✗ | static INSTR(ArrayAppendFront) { | |
170 | ✗ | const M_Object o = *(M_Object *)(shred->reg); | |
171 | ✗ | const M_Vector a = ARRAY(o); | |
172 | ✗ | m_vector_add_front(a, shred->reg - ARRAY_SIZE(a)); | |
173 | } | ||
174 | |||
175 | ✗ | static INSTR(ArrayConcatLeft) { | |
176 | ✗ | const M_Object obase = *(M_Object *)(shred->reg - SZ_INT); | |
177 | ✗ | const M_Object omore = *(M_Object *)(shred->reg); | |
178 | ✗ | const M_Vector base = ARRAY(obase); | |
179 | ✗ | const M_Vector more = ARRAY(omore); | |
180 | ✗ | const m_uint len = ARRAY_LEN(base); | |
181 | ✗ | const m_uint sz = ARRAY_SIZE(base); | |
182 | ✗ | if ((ARRAY_LEN(base) += ARRAY_LEN(more)) >= ARRAY_CAP(base)) { | |
183 | ✗ | ARRAY_CAP(base) += ARRAY_CAP(more); | |
184 | m_bit *ptr = | ||
185 | ✗ | (m_bit *)xrealloc(base->ptr, ARRAY_OFFSET + ARRAY_CAP(base) * sz); | |
186 | ✗ | base->ptr = ptr; | |
187 | } | ||
188 | ✗ | m_bit *data = more->ptr + ARRAY_OFFSET; | |
189 | ✗ | memmove(ARRAY_PTR(base) + len * sz, data, sz); | |
190 | } | ||
191 | |||
192 | ✗ | static INSTR(ArrayConcatRight) { | |
193 | ✗ | const M_Object obase = *(M_Object *)(shred->reg); | |
194 | ✗ | const M_Object omore = *(M_Object *)(shred->reg - SZ_INT); | |
195 | ✗ | const M_Vector base = ARRAY(obase); | |
196 | ✗ | const M_Vector more = ARRAY(omore); | |
197 | ✗ | const m_uint len = ARRAY_LEN(base); | |
198 | ✗ | const m_uint sz = ARRAY_SIZE(base); | |
199 | ✗ | if ((ARRAY_LEN(base) += ARRAY_LEN(more)) >= ARRAY_CAP(base)) { | |
200 | ✗ | ARRAY_CAP(base) += ARRAY_CAP(more); | |
201 | m_bit *ptr = | ||
202 | ✗ | (m_bit *)xrealloc(base->ptr, ARRAY_OFFSET + ARRAY_CAP(base) * sz); | |
203 | ✗ | base->ptr = ptr; | |
204 | } | ||
205 | ✗ | memmove(ARRAY_PTR(base) + (ARRAY_LEN(more) + len - 1) * sz, ARRAY_PTR(base), | |
206 | len * sz); | ||
207 | ✗ | memmove(ARRAY_PTR(base), ARRAY_PTR(more), ARRAY_LEN(more) * sz); | |
208 | } | ||
209 | |||
210 | ✗ | static OP_EMIT(opem_array_sr) { | |
211 | ✗ | const Exp_Binary *bin = (Exp_Binary *)data; | |
212 | ✗ | if (shift_match(bin->lhs->type, bin->rhs->type)) | |
213 | ✗ | return emit_array_shift(emit, ArrayConcatRight); | |
214 | ✗ | emit_regmove(emit, -SZ_INT); | |
215 | ✗ | if (tflag(bin->lhs->type, tflag_compound)) | |
216 | ✗ | emit_compound_addref(emit, bin->lhs->type, -SZ_INT - bin->lhs->type->size, false); | |
217 | ✗ | (void)emit_add_instr(emit, ArrayAppendFront); | |
218 | ✗ | return GW_OK; | |
219 | } | ||
220 | |||
221 | ✗ | static OP_EMIT(opem_array_sl) { | |
222 | ✗ | const Exp_Binary *bin = (Exp_Binary *)data; | |
223 | ✗ | if (shift_match(bin->rhs->type, bin->lhs->type)) | |
224 | ✗ | return emit_array_shift(emit, ArrayConcatLeft); | |
225 | ✗ | if (tflag(bin->rhs->type, tflag_compound)) | |
226 | ✗ | emit_compound_addref(emit, bin->rhs->type, -bin->rhs->type->size, false); | |
227 | ✗ | emit_regmove(emit, -bin->rhs->type->size); | |
228 | ✗ | emit_add_instr(emit, ArrayAppend); | |
229 | ✗ | return GW_OK; | |
230 | } | ||
231 | |||
232 | // check me. use common ancestor maybe | ||
233 | 1 | static OP_CHECK(opck_array_cast) { | |
234 | 1 | const Exp_Cast *cast = (Exp_Cast *)data; | |
235 | 1 | const Type l = array_base(cast->exp->type); | |
236 | 1 | const Type r = array_base(exp_self(cast)->type); | |
237 |
1/2✗ Branch 3 not taken.
✓ Branch 4 taken 1 times.
|
1 | if (get_depth(cast->exp->type) == get_depth(exp_self(cast)->type) && |
238 | ✗ | isa(l->info->base_type, r->info->base_type) > 0) | |
239 | ✗ | return l; | |
240 | 1 | return NULL; | |
241 | } | ||
242 | |||
243 | 4 | static OP_CHECK(opck_array_slice) { | |
244 | 4 | const Exp e = (Exp)data; | |
245 | 4 | exp_setmeta(exp_self(e), 1); | |
246 | 4 | return e->d.exp_slice.base->type; | |
247 | } | ||
248 | |||
249 | 7 | static inline m_bool bounds(const M_Vector v, const m_int i) { | |
250 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7 times.
|
7 | CHECK_BB(i); |
251 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 1 times.
|
7 | return (m_uint)i < ARRAY_LEN(v) ? GW_OK : GW_ERROR; |
252 | } | ||
253 | |||
254 | 4 | static INSTR(ArraySlice) { | |
255 | 4 | shred->reg -= SZ_INT * 2; | |
256 | 4 | const M_Object array = *(M_Object *)REG(-SZ_INT); | |
257 | 4 | const M_Vector in = ARRAY(array); | |
258 | 4 | const m_int start = *(m_uint *)REG(0); | |
259 | 4 | m_int end = *(m_uint *)REG(SZ_INT); | |
260 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 2 times.
|
4 | if (end < 0) end = ARRAY_LEN(in) + end; |
261 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 2 times.
|
4 | const m_int op = start < end ? 1 : -1; |
262 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 2 times.
|
4 | const m_uint sz = op > 0 ? end - start : start - end; |
263 |
3/4✓ Branch 1 taken 3 times.
✓ Branch 2 taken 1 times.
✗ Branch 4 not taken.
✓ Branch 5 taken 3 times.
|
4 | if (bounds(in, start) < 0 || bounds(in, end) < 0) { |
264 | 1 | handle(shred, "OutOfBoundsArraySliceException"); | |
265 | 1 | return; | |
266 | } | ||
267 | 3 | const M_Object out = new_array(shred->info->mp, array->type_ref, sz); | |
268 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
|
6 | for (m_int i = start, j = 0; i != end; i += op, ++j) { |
269 | 3 | m_bit buf[ARRAY_SIZE(in)]; | |
270 | 3 | m_vector_get(in, i, &buf); | |
271 | 3 | m_vector_set(ARRAY(out), j, buf); | |
272 | } | ||
273 | 3 | *(M_Object *)REG(-SZ_INT) = out; | |
274 | } | ||
275 | |||
276 | 4 | static OP_EMIT(opem_array_slice) { | |
277 | 4 | emit_add_instr(emit, ArraySlice); | |
278 | 4 | return GW_OK; | |
279 | } | ||
280 | |||
281 | 13 | static FREEARG(freearg_array) { | |
282 | 13 | ArrayInfo *info = (ArrayInfo *)instr->m_val; | |
283 | 13 | vector_release(&info->type); | |
284 | 13 | mp_free(((Gwion)gwion)->mp, ArrayInfo, info); | |
285 | 13 | } | |
286 | |||
287 | ANN Type check_array_access(const Env env, const Array_Sub array); | ||
288 | |||
289 | 5 | ANN static inline Type get_array_type(const Type type) { | |
290 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
|
5 | const Type t = !tflag(type, tflag_ref) ? type : (Type)vector_front(&type->info->tuple->contains); |
291 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | return t->array_depth ? t : typedef_base(t); |
292 | } | ||
293 | |||
294 | 5 | static OP_CHECK(opck_array) { | |
295 | 5 | const Array_Sub array = (Array_Sub)data; | |
296 | 5 | const Type t_int = env->gwion->type[et_int]; | |
297 | 5 | Exp e = array->exp; | |
298 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 7 times.
|
7 | do CHECK_BN(check_implicit(env, e, t_int)); |
299 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 5 times.
|
7 | while ((e = e->next)); |
300 | 5 | const Type t = get_array_type(array->type); | |
301 |
1/2✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
|
5 | if (t->array_depth >= array->depth) |
302 | 5 | return array_type(env, array_base(t), t->array_depth - array->depth); | |
303 | ✗ | const Exp curr = take_exp(array->exp, t->array_depth); | |
304 | ✗ | struct Array_Sub_ next = {curr->next, array_base(t), | |
305 | ✗ | array->depth - t->array_depth}; | |
306 | ✗ | return check_array_access(env, &next) ?: env->gwion->type[et_error]; | |
307 | } | ||
308 | |||
309 | 4 | ANN static void array_loop(const Emitter emit, const m_uint depth) { | |
310 | 4 | emit_regmove(emit, -depth * SZ_INT); | |
311 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 4 times.
|
6 | for (m_uint i = 0; i < depth - 1; ++i) { |
312 | 2 | const Instr access = emit_add_instr(emit, ArrayAccess); | |
313 | 2 | access->m_val = i * SZ_INT; | |
314 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | access->m_val2 = !i ? SZ_INT : 0; |
315 | 2 | const Instr get = emit_add_instr(emit, ArrayGet); | |
316 | 2 | get->m_val = i * SZ_INT; | |
317 | 2 | get->m_val2 = -SZ_INT; | |
318 | 2 | const Instr ex = emit_add_instr(emit, GWOP_EXCEPT); | |
319 | 2 | ex->m_val = -SZ_INT; | |
320 | } | ||
321 | 4 | emit_regmove(emit, -SZ_INT); | |
322 | 4 | const Instr access = emit_add_instr(emit, ArrayAccess); | |
323 | 4 | access->m_val = depth * SZ_INT; | |
324 | 4 | } | |
325 | |||
326 | 4 | ANN static void array_finish(const Emitter emit, const Array_Sub array, const m_bool is_var) { | |
327 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | const Instr get = emit_add_instr(emit, is_var ? ArrayAddr : ArrayGet); |
328 | 4 | const Type t = array->type; | |
329 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | if(!is_var) { |
330 |
3/4✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 3 times.
✓ Branch 4 taken 1 times.
|
4 | if(array->depth < get_depth(t) || isa(array_base(t), emit->gwion->type[et_object]) > 0) |
331 | 3 | emit_add_instr(emit, GWOP_EXCEPT); | |
332 | } | ||
333 | 4 | get->m_val = array->depth * SZ_INT; | |
334 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | emit_regmove(emit, is_var ? SZ_INT : t->size); |
335 | 4 | } | |
336 | |||
337 | 4 | ANN static inline m_bool array_do(const Emitter emit, const Array_Sub array, | |
338 | const m_bool is_var) { | ||
339 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
|
4 | CHECK_BB(emit_exp(emit, array->exp)); |
340 | 4 | array_loop(emit, array->depth); | |
341 | 4 | array_finish(emit, array, is_var); | |
342 | 4 | return GW_OK; | |
343 | } | ||
344 | |||
345 | 16 | ANN m_bool get_emit_var(const Emitter emit, const Type t, bool is_var) { | |
346 | 16 | const Env env = emit->env; | |
347 | 16 | bool vars[2] = { is_var }; | |
348 | 16 | struct Op_Import opi = {.op = insert_symbol("@array_init"), | |
349 | .lhs = t, | ||
350 | 16 | .data = (uintptr_t)vars}; | |
351 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 16 times.
|
16 | CHECK_BB(op_emit(emit, &opi)); |
352 | 16 | return vars[1]; | |
353 | } | ||
354 | |||
355 | ✗ | ANN static inline Exp emit_n_exp(const Emitter emit, | |
356 | struct ArrayAccessInfo *const info) { | ||
357 | ✗ | const Exp e = take_exp(info->array.exp, info->array.depth); | |
358 | ✗ | const Exp next = e->next; | |
359 | ✗ | e->next = NULL; | |
360 | ✗ | struct Array_Sub_ partial = {info->array.exp, info->array.type, | |
361 | ✗ | info->array.depth}; | |
362 | ✗ | const bool is_var = get_emit_var(emit, array_base(info->array.type), info->is_var); | |
363 | ✗ | const m_bool ret = array_do(emit, &partial, is_var); | |
364 | ✗ | e->next = next; | |
365 | ✗ | return ret > 0 ? next : NULL; | |
366 | } | ||
367 | |||
368 | 4 | ANN static Type emit_get_array_type(const Emitter emit, const Type t) { | |
369 |
1/2✓ Branch 1 taken 4 times.
✗ Branch 2 not taken.
|
4 | if(!tflag(t, tflag_ref)) return t; |
370 | ✗ | const Instr instr = emit_add_instr(emit, Reg2RegDeref); | |
371 | ✗ | instr->m_val = -SZ_INT; | |
372 | ✗ | instr->m_val2 = -SZ_INT; | |
373 | ✗ | return (Type)vector_front(&t->info->tuple->contains); | |
374 | |||
375 | } | ||
376 | |||
377 | 4 | static OP_EMIT(opem_array_access) { | |
378 | 4 | struct ArrayAccessInfo *const info = (struct ArrayAccessInfo *)data; | |
379 | 4 | const Type t = emit_get_array_type(emit, info->array.type); | |
380 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | if (t->array_depth >= info->array.depth) { |
381 | 4 | struct Array_Sub_ next = { | |
382 | 4 | .exp = info->array.exp, .type = info->type, .depth = info->array.depth}; | |
383 | 4 | return array_do(emit, &next, info->is_var); | |
384 | } | ||
385 | ✗ | struct Array_Sub_ partial = {info->array.exp, t, | |
386 | ✗ | t->array_depth}; | |
387 | ✗ | struct Array_Sub_ next = {info->array.exp, array_base(t), | |
388 | ✗ | info->array.depth - t->array_depth}; | |
389 | ✗ | info->array = partial; | |
390 | ✗ | const Exp exp = emit_n_exp(emit, info); | |
391 | ✗ | next.exp = exp; | |
392 | ✗ | info->array = next; | |
393 | ✗ | return exp ? emit_array_access(emit, info) : GW_ERROR; | |
394 | } | ||
395 | |||
396 | static m_bit map_byte[BYTECODE_SZ * 5]; | ||
397 | static const struct VM_Code_ map_run_code = {.name = "map_run_code", | ||
398 | .bytecode = map_byte}; | ||
399 | |||
400 | static m_bit compactmap_byte[BYTECODE_SZ * 5]; | ||
401 | static const struct VM_Code_ compactmap_run_code = { | ||
402 | .name = "compactmap_run_code", .bytecode = compactmap_byte}; | ||
403 | |||
404 | static m_bit filter_byte[BYTECODE_SZ * 5]; | ||
405 | static const struct VM_Code_ filter_run_code = {.name = "filter_run_code", | ||
406 | .bytecode = filter_byte}; | ||
407 | |||
408 | static m_bit count_byte[BYTECODE_SZ * 5]; | ||
409 | static const struct VM_Code_ count_run_code = {.name = "count_run_code", | ||
410 | .bytecode = count_byte}; | ||
411 | |||
412 | static m_bit foldl_byte[BYTECODE_SZ * 5]; | ||
413 | static const struct VM_Code_ foldl_run_code = {.name = "foldl_run_code", | ||
414 | .bytecode = foldl_byte}; | ||
415 | |||
416 | static m_bit foldr_byte[BYTECODE_SZ * 5]; | ||
417 | static const struct VM_Code_ foldr_run_code = {.name = "foldr_run_code", | ||
418 | .bytecode = foldr_byte}; | ||
419 | |||
420 | static m_bit new_byte[BYTECODE_SZ * 5]; | ||
421 | static const struct VM_Code_ new_run_code = {.name = "new_run_code", | ||
422 | .bytecode = new_byte}; | ||
423 | |||
424 | typedef struct FunctionalFrame { | ||
425 | VM_Code code; | ||
426 | M_Object o; | ||
427 | uint16_t pc; | ||
428 | uint16_t offset; | ||
429 | uint16_t index; | ||
430 | uint16_t ret_size; | ||
431 | } FunctionalFrame; | ||
432 | |||
433 | 2 | ANN static inline void _init(const VM_Shred shred, const struct VM_Code_ *code, const M_Object o, | |
434 | const m_uint offset, const m_uint start) { | ||
435 | 2 | FunctionalFrame *frame = &*(FunctionalFrame *)MEM(SZ_INT * 2 + start); | |
436 | 2 | frame->pc = shred->pc; | |
437 | 2 | frame->code = shred->code; | |
438 | 2 | frame->offset = offset; | |
439 | 2 | frame->index = 0; | |
440 | 2 | *(m_uint *)REG(SZ_INT) = offset; | |
441 | 2 | shred->code = (VM_Code)code; | |
442 | 2 | shred->pc = 0; | |
443 | 2 | shredule(shred->tick->shreduler, shred, 0); | |
444 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | if(!(*(VM_Code *)REG(0) = *(VM_Code*)o->data)) |
445 | ✗ | handle(shred, "MissingCodeException"); | |
446 | 2 | } | |
447 | |||
448 | ✗ | ANN static inline void _next(const VM_Shred shred, const m_uint offset) { | |
449 | ✗ | shred->pc = 0; | |
450 | ✗ | *(m_uint *)REG(0) = offset; | |
451 | ✗ | POP_REG(shred, SZ_INT); | |
452 | } | ||
453 | |||
454 | 2 | ANN static inline void _return(const VM_Shred shred, | |
455 | const FunctionalFrame *frame) { | ||
456 | 2 | shred->pc = frame->pc; | |
457 | 2 | shred->code = frame->code; | |
458 | 2 | } | |
459 | |||
460 | 2 | ANN static inline void _finish(const VM_Shred shred, | |
461 | const FunctionalFrame *frame) { | ||
462 | 2 | POP_MEM(shred, frame->offset); | |
463 | 2 | shredule(shred->tick->shreduler, shred, 0); | |
464 | 2 | } | |
465 | |||
466 | #define MAP_CODE_OFFSET (sizeof(FunctionalFrame) + sizeof(struct frame_t)) | ||
467 | 2 | static INSTR(map_run_ini) { | |
468 | 2 | const VM_Code code = *(VM_Code*)REG(0); | |
469 | 2 | const m_uint offset = *(m_uint *)REG(SZ_INT); | |
470 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (offset) PUSH_MEM(shred, offset); |
471 | 2 | PUSH_REG(shred, SZ_INT); | |
472 | 2 | const M_Object self = *(M_Object *)MEM(0); | |
473 | 2 | const M_Vector array = ARRAY(self); | |
474 | 2 | FunctionalFrame *frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
475 | 2 | frame->ret_size = code->ret_type->size; | |
476 | 2 | shred->pc++; | |
477 | 2 | shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space | |
478 | 2 | m_vector_get(array, frame->index, &*(m_bit **)(shred->mem + SZ_INT * 2 + frame->offset + frame->code->stack_depth)); | |
479 | 2 | } | |
480 | |||
481 | ✗ | static INSTR(map_run_end) { | |
482 | ✗ | shred->mem -= MAP_CODE_OFFSET + SZ_INT; | |
483 | ✗ | const M_Object ret_obj = *(M_Object *)MEM(SZ_INT * 2); | |
484 | ✗ | const M_Vector array = ARRAY(*(M_Object *)MEM(0)); | |
485 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
486 | ✗ | POP_REG(shred, frame->ret_size); | |
487 | ✗ | m_vector_set(ARRAY(ret_obj), frame->index, shred->reg); | |
488 | ✗ | if (++frame->index == ARRAY_LEN(array)) { | |
489 | ✗ | _return(shred, frame); | |
490 | ✗ | *(M_Object *)(REG(-SZ_INT)) = ret_obj; | |
491 | } else | ||
492 | ✗ | _next(shred, frame->offset); | |
493 | ✗ | _finish(shred, frame); | |
494 | } | ||
495 | |||
496 | ✗ | static INSTR(compactmap_run_end) { | |
497 | ✗ | shred->mem -= MAP_CODE_OFFSET + SZ_INT; | |
498 | ✗ | const M_Object self = *(M_Object *)MEM(0); | |
499 | ✗ | const M_Vector self_array = ARRAY(self); | |
500 | ✗ | const M_Object ret_obj = *(M_Object *)MEM(SZ_INT * 2); | |
501 | ✗ | const M_Vector ret_array = ARRAY(ret_obj); | |
502 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
503 | ✗ | POP_REG(shred, frame->ret_size); | |
504 | ✗ | const m_uint size = m_vector_size(self_array); | |
505 | ✗ | const M_Object obj = *(M_Object *)REG(0); | |
506 | ✗ | if (*(m_uint *)obj->data) | |
507 | ✗ | m_vector_add(ret_array, &*(m_bit *)(obj->data + SZ_INT)); | |
508 | ✗ | if (++frame->index == size) { | |
509 | ✗ | _return(shred, frame); | |
510 | ✗ | *(M_Object *)(REG(-SZ_INT)) = ret_obj; | |
511 | } else | ||
512 | ✗ | _next(shred, frame->offset); | |
513 | ✗ | _finish(shred, frame); | |
514 | } | ||
515 | |||
516 | 2 | static INSTR(filter_run_end) { | |
517 | 2 | shred->mem -= MAP_CODE_OFFSET + SZ_INT; | |
518 | 2 | POP_REG(shred, SZ_INT); | |
519 | 2 | const M_Object self = *(M_Object *)MEM(0); | |
520 | 2 | const M_Object ret_obj = *(M_Object *)MEM(SZ_INT * 2); | |
521 | 2 | const M_Vector array = ARRAY(ret_obj); | |
522 | 2 | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
523 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
|
2 | if (*(m_uint *)(shred->reg)) |
524 | 1 | m_vector_add(array, | |
525 | 1 | ARRAY_PTR(ARRAY(self)) + frame->index * ARRAY_SIZE(array)); | |
526 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (++frame->index == ARRAY_LEN(ARRAY(self))) { |
527 | 2 | _return(shred, frame); | |
528 | 2 | *(M_Object *)(REG(-SZ_INT)) = ret_obj; | |
529 | } else | ||
530 | ✗ | _next(shred, frame->offset); | |
531 | 2 | _finish(shred, frame); | |
532 | 2 | } | |
533 | |||
534 | ✗ | static INSTR(count_run_end) { | |
535 | ✗ | shred->mem -= MAP_CODE_OFFSET + SZ_INT; | |
536 | ✗ | const M_Object self = *(M_Object *)MEM(0); | |
537 | ✗ | POP_REG(shred, SZ_INT); | |
538 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
539 | ✗ | if (*(m_uint *)(shred->reg)) (*(m_uint *)MEM(SZ_INT * 2))++; | |
540 | ✗ | if (++frame->index == ARRAY_LEN(ARRAY(self))) { | |
541 | ✗ | _return(shred, frame); | |
542 | ✗ | *(m_uint *)(REG(-SZ_INT)) = *(m_uint *)MEM(SZ_INT * 2); | |
543 | } else | ||
544 | ✗ | _next(shred, frame->offset); | |
545 | ✗ | _finish(shred, frame); | |
546 | } | ||
547 | |||
548 | ✗ | static MFUN(vm_vector_map) { | |
549 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT * 3); | |
550 | const M_Object ret = | ||
551 | ✗ | new_array(shred->info->mp, o->type_ref, ARRAY_LEN(ARRAY(o))); | |
552 | ✗ | if (ARRAY_LEN(ARRAY(o))) { | |
553 | ✗ | *(M_Object *)MEM(SZ_INT * 2) = ret; | |
554 | ✗ | _init(shred, &map_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
555 | } else | ||
556 | ✗ | *(M_Object *)RETURN = ret; | |
557 | } | ||
558 | |||
559 | ✗ | static MFUN(vm_vector_compactmap) { | |
560 | ✗ | const VM_Code code = *(VM_Code *)REG(SZ_INT * 2); | |
561 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT * 3); | |
562 | ✗ | const M_Object ret = new_array(shred->info->mp, code->ret_type, 0); | |
563 | ✗ | if (ARRAY_LEN(ARRAY(o))) { | |
564 | ✗ | _init(shred, &compactmap_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
565 | ✗ | *(M_Object *)MEM(SZ_INT * 2) = ret; | |
566 | } else | ||
567 | ✗ | *(M_Object *)RETURN = ret; | |
568 | } | ||
569 | |||
570 | 2 | static MFUN(vm_vector_filter) { | |
571 | 2 | const m_uint offset = *(m_uint *)REG(SZ_INT * 3); | |
572 | 2 | const M_Object ret = new_array(shred->info->mp, o->type_ref, 0); | |
573 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (ARRAY_LEN(ARRAY(o))) { |
574 | 2 | _init(shred, &filter_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
575 | 2 | *(M_Object *)MEM(SZ_INT * 2) = ret; | |
576 | } else | ||
577 | ✗ | *(M_Object *)RETURN = ret; | |
578 | 2 | } | |
579 | |||
580 | ✗ | static MFUN(vm_vector_count) { | |
581 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT * 3); | |
582 | ✗ | if (ARRAY_LEN(ARRAY(o))) { | |
583 | ✗ | _init(shred, &count_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
584 | ✗ | *(m_uint *)MEM(SZ_INT * 2) = 0; | |
585 | } else | ||
586 | ✗ | *(m_uint *)RETURN = 0; | |
587 | } | ||
588 | |||
589 | ✗ | static INSTR(foldl_run_ini) { | |
590 | ✗ | const VM_Code code = *(VM_Code*)REG(0); | |
591 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT); | |
592 | ✗ | if (offset) PUSH_MEM(shred, offset); | |
593 | ✗ | const M_Object self = *(M_Object *)MEM(0); | |
594 | ✗ | *(m_uint *)(shred->reg + SZ_INT) = 0; | |
595 | ✗ | PUSH_REG(shred, SZ_INT); | |
596 | ✗ | shred->pc++; | |
597 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
598 | ✗ | frame->ret_size = code->ret_type->size; | |
599 | ✗ | shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space | |
600 | ✗ | m_vector_get(ARRAY(self), frame->index, | |
601 | ✗ | &*(m_bit **)(shred->mem + SZ_INT * 2 + frame->code->stack_depth)); | |
602 | } | ||
603 | |||
604 | ✗ | static INSTR(foldr_run_ini) { | |
605 | ✗ | const VM_Code code = *(VM_Code*)REG(0); | |
606 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT); | |
607 | ✗ | if (offset) PUSH_MEM(shred, offset); | |
608 | ✗ | const M_Object self = *(M_Object *)MEM(0); | |
609 | ✗ | *(m_uint *)(shred->reg + SZ_INT) = 0; | |
610 | ✗ | PUSH_REG(shred, SZ_INT); | |
611 | ✗ | shred->pc++; | |
612 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
613 | ✗ | frame->ret_size = code->ret_type->size; | |
614 | ✗ | shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space | |
615 | ✗ | const M_Vector array = ARRAY(self); | |
616 | ✗ | m_vector_get(array, ARRAY_LEN(array) - frame->index - 1, | |
617 | ✗ | &*(m_bit **)(shred->mem + SZ_INT * 2 + frame->code->stack_depth)); | |
618 | } | ||
619 | |||
620 | ✗ | static INSTR(fold_run_end) { | |
621 | ✗ | shred->mem -= MAP_CODE_OFFSET + SZ_INT; | |
622 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
623 | ✗ | const M_Object self = *(M_Object *)MEM(0); | |
624 | ✗ | const VM_Code code = *(VM_Code *)(*(M_Object*)MEM(SZ_INT))->data; | |
625 | ✗ | const m_uint sz = code->stack_depth - ARRAY_SIZE(ARRAY(self)); | |
626 | ✗ | const m_uint base_sz = code->stack_depth - sz; | |
627 | ✗ | POP_REG(shred, base_sz); // ret_sz? | |
628 | ✗ | if (++frame->index == ARRAY_LEN(ARRAY(self))) { | |
629 | ✗ | POP_REG(shred, SZ_INT - base_sz); | |
630 | ✗ | shred->pc = frame->pc; | |
631 | ✗ | shred->code = frame->code; | |
632 | ✗ | memcpy(REG(-sz), REG(0), base_sz); | |
633 | } else { | ||
634 | ✗ | memcpy(shred->mem + MAP_CODE_OFFSET + SZ_INT * 3 + sz, shred->reg, base_sz); | |
635 | ✗ | _next(shred, frame->offset); | |
636 | } | ||
637 | ✗ | _finish(shred, frame); | |
638 | } | ||
639 | |||
640 | ✗ | static INSTR(new_run_ini) { | |
641 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT); | |
642 | ✗ | if (offset) PUSH_MEM(shred, offset); | |
643 | ✗ | const M_Object arg = *(M_Object *)MEM(SZ_INT); | |
644 | ✗ | const VM_Code code = *(VM_Code*)arg->data; | |
645 | ✗ | *(VM_Code*)REG(0) = code; | |
646 | ✗ | PUSH_REG(shred, SZ_INT); | |
647 | ✗ | FunctionalFrame *frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
648 | ✗ | shred->pc++; | |
649 | ✗ | shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space | |
650 | ✗ | *(m_uint*)MEM(SZ_INT*2+offset) = frame->index; | |
651 | } | ||
652 | |||
653 | ✗ | static INSTR(new_run_end) { | |
654 | ✗ | shred->mem -= MAP_CODE_OFFSET + SZ_INT; | |
655 | ✗ | FunctionalFrame *const frame = &*(FunctionalFrame *)MEM(SZ_INT * 3); | |
656 | ✗ | const M_Object self = *(M_Object *)MEM(0); | |
657 | ✗ | const M_Vector array = ARRAY(self); | |
658 | ✗ | const m_uint base_sz = ARRAY_SIZE(array); | |
659 | ✗ | m_vector_set(array, frame->index, REG(-base_sz)); | |
660 | ✗ | POP_REG(shred, base_sz); | |
661 | ✗ | if (++frame->index == ARRAY_LEN(ARRAY(self))) { | |
662 | ✗ | shred->pc = frame->pc; | |
663 | ✗ | shred->code = frame->code; | |
664 | ✗ | *(M_Object*)REG(-SZ_INT) = self; | |
665 | ✗ | } else _next(shred, frame->offset); | |
666 | ✗ | _finish(shred, frame); | |
667 | } | ||
668 | |||
669 | ✗ | static MFUN(vm_vector_foldl) { | |
670 | ✗ | const m_bit *byte = shred->code->bytecode + (shred->pc - 1) * BYTECODE_SZ; | |
671 | ✗ | const m_uint acc_sz = *(m_uint *)(byte + SZ_INT); | |
672 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT * 3 + acc_sz); | |
673 | ✗ | if (ARRAY_LEN(ARRAY(o))) { | |
674 | ✗ | _init(shred, &foldl_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
675 | ✗ | memcpy(shred->mem + MAP_CODE_OFFSET + SZ_INT * 3 + acc_sz, MEM(SZ_INT * 2), | |
676 | acc_sz); | ||
677 | } else | ||
678 | ✗ | memcpy((m_bit *)RETURN, MEM(SZ_INT * 2), acc_sz); | |
679 | } | ||
680 | |||
681 | ✗ | static MFUN(vm_vector_foldr) { | |
682 | ✗ | const m_bit *byte = shred->code->bytecode + (shred->pc - 1) * BYTECODE_SZ; | |
683 | ✗ | const m_uint acc_sz = *(m_uint *)(byte + SZ_INT); | |
684 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT * 3 + acc_sz); | |
685 | ✗ | if (ARRAY_LEN(ARRAY(o))) { | |
686 | ✗ | _init(shred, &foldr_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
687 | ✗ | memcpy(shred->mem + MAP_CODE_OFFSET + SZ_INT * 3 + acc_sz, MEM(SZ_INT * 2), | |
688 | acc_sz); | ||
689 | } else | ||
690 | ✗ | memcpy((m_bit *)RETURN, MEM(SZ_INT * 2), acc_sz); | |
691 | } | ||
692 | |||
693 | ✗ | static MFUN(vm_vector_new) { | |
694 | ✗ | if (ARRAY_LEN(ARRAY(o))) { | |
695 | ✗ | const m_uint offset = *(m_uint *)REG(SZ_INT * 3); | |
696 | ✗ | _init(shred, &new_run_code, *(M_Object*)MEM(SZ_INT*1), offset, SZ_INT); | |
697 | } | ||
698 | ✗ | *(M_Object *)RETURN = o; | |
699 | } | ||
700 | |||
701 | 14872 | static void array_func(const Env env, const Type t, const m_str name, f_xfun fun) { | |
702 | 14872 | const Value v = nspc_lookup_value0(t->nspc, insert_symbol(name)); | |
703 | 14872 | builtin_func(env->gwion, v->d.func_ref, fun); | |
704 | 14872 | } | |
705 | |||
706 | 1352 | static OP_CHECK(opck_array_scan) { | |
707 | 1352 | struct TemplateScan *ts = (struct TemplateScan *)data; | |
708 | 1352 | const Type t_array = env->gwion->type[et_array]; | |
709 | 1352 | const Class_Def c = t_array->info->cdef; | |
710 |
2/4✓ Branch 0 taken 1352 times.
✗ Branch 1 not taken.
✗ Branch 3 not taken.
✓ Branch 4 taken 1352 times.
|
1352 | DECL_ON(const Type, base, |
711 | = ts->t != t_array ? ts->t : known_type(env, *mp_vector_at(ts->td->types, Type_Decl*, 0))); | ||
712 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1352 times.
|
1352 | if (base->size == 0) { |
713 | ✗ | gwerr_basic("Can't use type of size 0 as array base", NULL, NULL, | |
714 | ✗ | "/dev/null", (loc_t) {}, 0); | |
715 | ✗ | env_set_error(env, true); | |
716 | ✗ | return env->gwion->type[et_error]; | |
717 | } | ||
718 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 1352 times.
|
1352 | if (tflag(base, tflag_ref)) { |
719 | ✗ | gwerr_basic("Can't use ref types as array base", NULL, NULL, "/dev/null", | |
720 | ✗ | (loc_t) {}, 0); | |
721 | ✗ | env_set_error(env, true); | |
722 | ✗ | return env->gwion->type[et_error]; | |
723 | } | ||
724 | /* | ||
725 | if (!strncmp(base->name, "Option:[", 5)) { | ||
726 | gwerr_basic("Can't use option types as array base", NULL, NULL, "/dev/null", | ||
727 | (loc_t) {}, 0); | ||
728 | env_set_error(env, true); | ||
729 | return env->gwion->type[et_error]; | ||
730 | } | ||
731 | */ | ||
732 | 1352 | const Symbol sym = array_sym(env, array_base_simple(base), base->array_depth + 1); | |
733 | 1352 | const Type type = nspc_lookup_type1(base->info->value->from->owner, sym); | |
734 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1352 times.
|
1352 | if (type) return type; |
735 | 1352 | const Class_Def cdef = cpy_class_def(env->gwion->mp, c); | |
736 | 1352 | cdef->base.ext = type2td(env->gwion, t_array, (loc_t) {}); | |
737 | 1352 | cdef->base.xid = sym; | |
738 | 1352 | cdef->base.tmpl->call = new_mp_vector(env->gwion->mp, Type_Decl*, 1); | |
739 | 1352 | mp_vector_set(cdef->base.tmpl->call, Type_Decl*, 0, type2td(env->gwion, base, (loc_t) {})); | |
740 | 1352 | const Context ctx = env->context; | |
741 | 1352 | env->context = base->info->value->from->ctx; | |
742 | 1352 | const m_uint scope = env_push(env, base->info->value->from->owner_class, | |
743 | 1352 | base->info->value->from->owner); | |
744 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 1352 times.
|
1352 | CHECK_BN(scan0_class_def(env, cdef)); |
745 | 1352 | const Type t = cdef->base.type; | |
746 |
3/4✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1348 times.
✓ Branch 3 taken 4 times.
✗ Branch 4 not taken.
|
1352 | if (GET_FLAG(base, abstract) && !tflag(base, tflag_union)) |
747 | 4 | SET_FLAG(t, abstract); | |
748 | else | ||
749 | 1348 | UNSET_FLAG(t, abstract); | |
750 | 1352 | const m_bool ret = traverse_cdef(env, t); | |
751 | 1352 | UNSET_FLAG(t, abstract); | |
752 | 1352 | env_pop(env, scope); | |
753 | 1352 | env->context = ctx; | |
754 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1352 times.
|
1352 | if (ret == GW_ERROR) return NULL; |
755 | 1352 | set_tflag(t, tflag_emit); | |
756 | 1352 | t->array_depth = base->array_depth + 1; | |
757 | 1352 | t->info->base_type = array_base(base); | |
758 | 1352 | set_tflag(t, tflag_cdef | tflag_tmpl); | |
759 |
3/4✓ Branch 1 taken 666 times.
✓ Branch 2 taken 686 times.
✓ Branch 3 taken 666 times.
✗ Branch 4 not taken.
|
2018 | void *rem = tflag(base, tflag_compound) |
760 | 666 | ? !tflag(base, tflag_struct) ? vm_vector_rem_obj | |
761 | : vm_vector_rem_struct | ||
762 | : vm_vector_rem; | ||
763 | 1352 | builtin_func(env->gwion, (Func)vector_at(&t->nspc->vtable, 0), rem); | |
764 |
3/4✓ Branch 1 taken 666 times.
✓ Branch 2 taken 686 times.
✓ Branch 3 taken 666 times.
✗ Branch 4 not taken.
|
2018 | void *insert = tflag(base, tflag_compound) |
765 | 666 | ? !tflag(base, tflag_struct) ? vm_vector_insert_obj | |
766 | : vm_vector_insert_struct | ||
767 | : vm_vector_insert; | ||
768 | 1352 | array_func(env, t, "insert", insert); | |
769 | 1352 | array_func(env, t, "size", vm_vector_size); | |
770 | 1352 | array_func(env, t, "depth", vm_vector_depth); | |
771 | 1352 | array_func(env, t, "cap", vm_vector_cap); | |
772 | 1352 | array_func(env, t, "random", vm_vector_random); | |
773 | |||
774 | 1352 | array_func(env, t, "map", vm_vector_map); | |
775 | 1352 | array_func(env, t, "compactMap", vm_vector_compactmap); | |
776 | 1352 | array_func(env, t, "filter", vm_vector_filter); | |
777 | 1352 | array_func(env, t, "count", vm_vector_count); | |
778 | 1352 | array_func(env, t, "foldl", vm_vector_foldl); | |
779 | 1352 | array_func(env, t, "foldr", vm_vector_foldr); | |
780 | // array_func(env, t, "new", vm_vector_new); | ||
781 | |||
782 |
2/2✓ Branch 1 taken 666 times.
✓ Branch 2 taken 686 times.
|
1352 | if (tflag(base, tflag_compound)) { |
783 | 666 | t->nspc->dtor = new_vmcode(env->gwion->mp, NULL, NULL, | |
784 | "array component dtor", SZ_INT, true, false); | ||
785 | 666 | set_tflag(t, tflag_dtor); | |
786 |
1/2✓ Branch 0 taken 666 times.
✗ Branch 1 not taken.
|
666 | t->nspc->dtor->native_func = (m_uint)( |
787 | 666 | !tflag(base, tflag_struct) ? array_dtor_obj : array_dtor_struct); | |
788 | } | ||
789 | 1352 | return t; | |
790 | } | ||
791 | |||
792 | ✗ | static OP_CHECK(opck_array_implicit) { | |
793 | ✗ | const struct Implicit *imp = (struct Implicit *)data; | |
794 | ✗ | if (imp->t->array_depth != imp->e->type->array_depth) | |
795 | ✗ | return env->gwion->type[et_error]; | |
796 | ✗ | if (isa(array_base(imp->e->type), array_base(imp->t)) < 0) | |
797 | ✗ | return env->gwion->type[et_error]; | |
798 | ✗ | return imp->t; | |
799 | } | ||
800 | |||
801 | 1 | static OP_EMIT(opem_array_each_init) { | |
802 | 1 | Looper *loop = (Looper *)data; | |
803 | 1 | const Instr instr = emit_add_instr(emit, AutoUnrollInit); | |
804 | 1 | instr->m_val = loop->offset; | |
805 | 1 | return GW_OK; | |
806 | } | ||
807 | |||
808 | |||
809 | 7 | ANN static inline Type foreach_type(const Env env, const Exp exp) { | |
810 | 7 | const Type et = exp->type; | |
811 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 7 times.
|
7 | DECL_OO(Type, base, = typedef_base(et)); |
812 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 7 times.
|
7 | DECL_OO(const Type, t, = array_base_simple(base)); |
813 |
1/2✓ Branch 1 taken 7 times.
✗ Branch 2 not taken.
|
7 | if(!tflag(base, tflag_ref)) { |
814 | 7 | const m_uint depth = base->array_depth - 1; | |
815 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7 times.
|
7 | return depth ? array_type(env, t, depth) : t; |
816 | } | ||
817 | ✗ | const Type inner = (Type)vector_front(&base->info->tuple->contains); | |
818 | ✗ | const Type refbase = array_base_simple(inner); | |
819 | ✗ | const m_uint depth = inner->array_depth - 1; | |
820 | ✗ | return depth ? array_type(env, refbase, depth) : refbase; | |
821 | } | ||
822 | |||
823 | // rewrite me | ||
824 | 7 | static OP_CHECK(opck_array_each_val) { | |
825 | 7 | const Exp exp = (const Exp) data; | |
826 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 7 times.
|
7 | DECL_ON(const Type, base, = foreach_type(env, exp)); |
827 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 7 times.
|
7 | CHECK_BN(ensure_traverse(env, base)); |
828 | 7 | return ref_type(env->gwion, base, exp->pos); | |
829 | } | ||
830 | |||
831 | 7 | static OP_EMIT(opem_array_each) { | |
832 | 7 | Looper *loop = (Looper *)data; | |
833 | 7 | const Instr instr = emit_add_instr(emit, AutoLoop); | |
834 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 2 times.
|
7 | if(!loop->n) { |
835 | 5 | instr->m_val2 = loop->offset + SZ_INT; | |
836 | 5 | loop->instr = instr; | |
837 | } else { | ||
838 | 2 | instr->m_val2 = loop->offset + SZ_INT*2; | |
839 | 2 | vector_add(&loop->unroll_v, (m_uint)instr); | |
840 | } | ||
841 | 7 | return GW_OK; | |
842 | } | ||
843 | |||
844 | 4466 | ANN static void prepare_run(m_bit *const byte, const f_instr ini, | |
845 | const f_instr end) { | ||
846 | 4466 | *(unsigned *)(byte) = eOP_MAX; | |
847 | 4466 | *(f_instr *)(byte+ SZ_INT * 2) = ini; | |
848 | 4466 | *(unsigned *)(byte + BYTECODE_SZ) = eSetCode; | |
849 | 4466 | *(uint16_t *)(byte + BYTECODE_SZ + SZ_INT * 2) = 3; | |
850 | 4466 | *(unsigned *)(byte + BYTECODE_SZ * 2) = eOverflow; | |
851 | 4466 | *(unsigned *)(byte + BYTECODE_SZ * 3) = eOP_MAX; | |
852 | 4466 | *(f_instr *)(byte + BYTECODE_SZ * 3 + SZ_INT * 2) = end; | |
853 | 4466 | *(unsigned *)(byte + BYTECODE_SZ * 4) = eEOC; | |
854 | 4466 | } | |
855 | |||
856 | 2552 | ANN static void prepare_map_run(m_bit *const byte, const f_instr end) { | |
857 | 2552 | prepare_run(byte, map_run_ini, end); | |
858 | 2552 | vm_prepare(NULL, byte); | |
859 | 2552 | } | |
860 | |||
861 | 1276 | ANN static void prepare_fold_run(m_bit *const byte, const f_instr ini) { | |
862 | 1276 | prepare_run(byte, ini, fold_run_end); | |
863 | 1276 | vm_prepare(NULL, byte); | |
864 | 1276 | } | |
865 | |||
866 | 638 | GWION_IMPORT(array) { | |
867 | 638 | prepare_map_run(map_byte, map_run_end); | |
868 | 638 | prepare_map_run(compactmap_byte, compactmap_run_end); | |
869 | 638 | prepare_map_run(filter_byte, filter_run_end); | |
870 | 638 | prepare_map_run(count_byte, count_run_end); | |
871 | 638 | prepare_fold_run(foldl_byte, foldl_run_ini); | |
872 | 638 | prepare_fold_run(foldr_byte, foldr_run_ini); | |
873 | 638 | prepare_run(new_byte, new_run_ini, new_run_end); | |
874 | 638 | vm_prepare(NULL, new_byte); | |
875 | 638 | const Type t_array = gwi_class_ini(gwi, "Array:[T]", "Object"); | |
876 | 638 | set_tflag(t_array, tflag_infer); | |
877 | 638 | gwi->gwion->type[et_array] = t_array; | |
878 | 638 | gwi_class_xtor(gwi, NULL, array_dtor); | |
879 | 638 | t_array->nspc->offset += SZ_INT*2; | |
880 | |||
881 | 638 | GWI_BB(gwi_fptr_ini(gwi, "A", "map_t:[A]")) | |
882 | 638 | GWI_BB(gwi_func_arg(gwi, "T", "elem")) | |
883 | 638 | GWI_BB(gwi_fptr_end(gwi, ae_flag_static)) | |
884 | |||
885 | 638 | GWI_BB(gwi_fptr_ini(gwi, "Option:[A]", "compactmap_t:[A]")) | |
886 | 638 | GWI_BB(gwi_func_arg(gwi, "T", "elem")) | |
887 | 638 | GWI_BB(gwi_fptr_end(gwi, ae_flag_static)) | |
888 | |||
889 | 638 | GWI_BB(gwi_fptr_ini(gwi, "A", "fold_t:[A]")) | |
890 | 638 | GWI_BB(gwi_func_arg(gwi, "T", "elem")) | |
891 | 638 | GWI_BB(gwi_func_arg(gwi, "A", "acc")) | |
892 | 638 | GWI_BB(gwi_fptr_end(gwi, ae_flag_static)) | |
893 | |||
894 | 638 | GWI_BB(gwi_fptr_ini(gwi, "bool", "filter_t")) | |
895 | 638 | GWI_BB(gwi_func_arg(gwi, "T", "elem")) | |
896 | 638 | GWI_BB(gwi_fptr_end(gwi, ae_flag_static)) | |
897 | |||
898 | 638 | GWI_BB(gwi_fptr_ini(gwi, "T", "new_t")) | |
899 | 638 | GWI_BB(gwi_func_arg(gwi, "int", "idx")) | |
900 | 638 | GWI_BB(gwi_fptr_end(gwi, ae_flag_static)) | |
901 | |||
902 | // put functions using T first | ||
903 | 638 | GWI_BB(gwi_func_ini(gwi, "bool", "remove")) | |
904 | 638 | GWI_BB(gwi_func_arg(gwi, "int", "index")) | |
905 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_rem, ae_flag_none)) | |
906 | |||
907 | 638 | GWI_BB(gwi_func_ini(gwi, "bool", "insert")) | |
908 | 638 | GWI_BB(gwi_func_arg(gwi, "int", "index")) | |
909 | 638 | GWI_BB(gwi_func_arg(gwi, "T", "data")) | |
910 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_insert, ae_flag_none)) | |
911 | |||
912 | 638 | GWI_BB(gwi_func_ini(gwi, "int", "size")) | |
913 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_size, ae_flag_none)) | |
914 | 638 | GWI_BB(gwi_func_ini(gwi, "int", "depth")) | |
915 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_depth, ae_flag_none)) | |
916 | |||
917 | 638 | GWI_BB(gwi_func_ini(gwi, "int", "cap")) | |
918 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_cap, ae_flag_none)) | |
919 | |||
920 | 638 | GWI_BB(gwi_func_ini(gwi, "T", "random")) | |
921 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_random, ae_flag_none)) | |
922 | |||
923 | 638 | GWI_BB(gwi_func_ini(gwi, "A[]", "map:[A]")) | |
924 | 638 | GWI_BB(gwi_func_arg(gwi, "map_t:[A]", "data")) | |
925 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_map, ae_flag_none)) | |
926 | |||
927 | 638 | GWI_BB(gwi_func_ini(gwi, "A[]", "compactMap:[A]")) | |
928 | 638 | GWI_BB(gwi_func_arg(gwi, "compactmap_t:[A]", "data")) | |
929 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_compactmap, ae_flag_none)) | |
930 | |||
931 | 638 | GWI_BB(gwi_func_ini(gwi, "T[]", "filter")) | |
932 | 638 | GWI_BB(gwi_func_arg(gwi, "filter_t", "data")) | |
933 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_filter, ae_flag_none)) | |
934 | |||
935 | 638 | GWI_BB(gwi_func_ini(gwi, "int", "count")) | |
936 | 638 | GWI_BB(gwi_func_arg(gwi, "filter_t", "data")) | |
937 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_count, ae_flag_none)) | |
938 | |||
939 | 638 | GWI_BB(gwi_func_ini(gwi, "A", "foldl:[A]")) | |
940 | 638 | GWI_BB(gwi_func_arg(gwi, "fold_t:[A]", "data")) | |
941 | 638 | GWI_BB(gwi_func_arg(gwi, "A", "initial")) | |
942 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_foldl, ae_flag_none)) | |
943 | |||
944 | 638 | GWI_BB(gwi_func_ini(gwi, "A", "foldr:[A]")) | |
945 | 638 | GWI_BB(gwi_func_arg(gwi, "fold_t:[A]", "data")) | |
946 | 638 | GWI_BB(gwi_func_arg(gwi, "A", "initial")) | |
947 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_foldr, ae_flag_none)) | |
948 | |||
949 | 638 | GWI_BB(gwi_func_ini(gwi, "auto", "new")) | |
950 | 638 | GWI_BB(gwi_func_arg(gwi, "new_t", "init")) | |
951 | 638 | GWI_BB(gwi_func_end(gwi, vm_vector_new, ae_flag_none)) | |
952 | |||
953 | 638 | GWI_BB(gwi_class_end(gwi)) | |
954 | |||
955 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", "Array", NULL)) | |
956 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_at)) | |
957 | 638 | GWI_BB(gwi_oper_end(gwi, ":=>", NULL)) | |
958 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_implicit)) | |
959 | // GWI_BB(gwi_oper_end(gwi, "@implicit", NULL)) | ||
960 | 638 | GWI_BB(gwi_oper_end(gwi, "@implicit", NoOp)) | |
961 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", (m_str)OP_ANY_TYPE, NULL)) | |
962 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_sl)) | |
963 | 638 | GWI_BB(gwi_oper_emi(gwi, opem_array_sl)) | |
964 | 638 | GWI_BB(gwi_oper_end(gwi, "<<", NULL)) | |
965 | 638 | GWI_BB(gwi_oper_ini(gwi, (m_str)OP_ANY_TYPE, "Array", NULL)) | |
966 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_sr)) | |
967 | 638 | GWI_BB(gwi_oper_emi(gwi, opem_array_sr)) | |
968 | 638 | GWI_BB(gwi_oper_end(gwi, ">>", NULL)) | |
969 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", "Array", NULL)) | |
970 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_cast)) | |
971 | 638 | GWI_BB(gwi_oper_end(gwi, "$", NULL)) | |
972 | 638 | GWI_BB(gwi_oper_ini(gwi, "int", "Array", "int")) | |
973 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_slice)) | |
974 | 638 | GWI_BB(gwi_oper_emi(gwi, opem_array_slice)) | |
975 | 638 | GWI_BB(gwi_oper_end(gwi, "[:]", NULL)) | |
976 | 638 | GWI_BB(gwi_oper_ini(gwi, "int", "Array", NULL)) | |
977 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array)) | |
978 | 638 | GWI_BB(gwi_oper_emi(gwi, opem_array_access)) | |
979 | 638 | GWI_BB(gwi_oper_end(gwi, "[]", NULL)) | |
980 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", NULL, "void")) | |
981 | 638 | GWI_BB(gwi_oper_emi(gwi, opem_array_each_init)) | |
982 | 638 | GWI_BB(gwi_oper_end(gwi, "@each_init", NULL)) | |
983 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", NULL, "int")) | |
984 | 638 | GWI_BB(gwi_oper_emi(gwi, opem_array_each)) | |
985 | 638 | GWI_BB(gwi_oper_end(gwi, "@each", NULL)) | |
986 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", NULL, NULL)) | |
987 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_each_val)) | |
988 | 638 | GWI_BB(gwi_oper_end(gwi, "@each_val", NULL)) | |
989 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", NULL, "int")) | |
990 | 638 | GWI_BB(gwi_oper_end(gwi, "@each_idx", NULL)) | |
991 | 638 | GWI_BB(gwi_oper_ini(gwi, "Array", NULL, NULL)) | |
992 | 638 | GWI_BB(gwi_oper_add(gwi, opck_array_scan)) | |
993 | 638 | GWI_BB(gwi_oper_end(gwi, "class", NULL)) | |
994 | |||
995 | 638 | GWI_BB(gwi_oper_ini(gwi, (m_str)OP_ANY_TYPE, NULL, "bool")) | |
996 | 638 | GWI_BB(gwi_oper_end(gwi, "@array_init", NoOp)) | |
997 | |||
998 | 638 | gwi_register_freearg(gwi, ArrayAlloc, freearg_array); | |
999 | 638 | return GW_OK; | |
1000 | } | ||
1001 | |||
1002 | ✗ | INSTR(ArrayStruct) { | |
1003 | ✗ | const M_Object ref = *(M_Object *)(REG(-SZ_INT * 5)); | |
1004 | ✗ | const m_int idx = (*(m_int *)((shred->reg - SZ_INT * 3)))++; | |
1005 | ✗ | *(m_bit **)(shred->reg) = m_vector_addr(ARRAY(ref), idx); | |
1006 | ✗ | shred->reg += SZ_INT; // regpush | |
1007 | } | ||
1008 | |||
1009 | 7 | INSTR(ArrayBottom) { | |
1010 | 7 | *(M_Object *)(*(m_uint **)REG(-SZ_INT * 4))[(*(m_int *)REG(-SZ_INT * 3))++] = | |
1011 | 7 | *(M_Object *)REG(-SZ_INT); | |
1012 | 7 | } | |
1013 | |||
1014 | 4 | INSTR(ArrayPost) { | |
1015 | 4 | xfree(*(m_uint **)REG(0)); | |
1016 | 4 | const M_Object o = *(M_Object *)(REG(-SZ_INT)); | |
1017 | 4 | *(m_uint *)(o->data + SZ_INT) = 0; | |
1018 | 4 | } | |
1019 | |||
1020 | 6 | INSTR(ArrayInit) { // for litteral array | |
1021 | 6 | const Type t = (Type)instr->m_val; | |
1022 | 6 | const m_uint sz = *(m_uint *)REG(0); | |
1023 | 6 | const m_uint off = instr->m_val2 * sz; | |
1024 | 6 | POP_REG(shred, off - SZ_INT); | |
1025 | 6 | const M_Object obj = new_array(shred->info->mp, t, sz); | |
1026 | 6 | memcpy(ARRAY(obj)->ptr + ARRAY_OFFSET, REG(-SZ_INT), off); | |
1027 | 6 | *(M_Object *)REG(-SZ_INT) = obj; | |
1028 | 6 | } | |
1029 | |||
1030 | #define TOP -1 | ||
1031 | |||
1032 | ANN static inline M_Object | ||
1033 | 16 | do_alloc_array_object(MemPool p, const ArrayInfo *info, const m_int cap) { | |
1034 | 16 | struct Vector_ v = info->type; | |
1035 | 16 | const Type t = (Type)vector_at(&v, (vtype)(-info->depth - 1)); | |
1036 | 16 | return new_array(p, t, (m_uint)cap); | |
1037 | } | ||
1038 | |||
1039 | ANN static inline M_Object | ||
1040 | 4 | do_alloc_array_init(ArrayInfo *info, const m_uint cap, const M_Object base) { | |
1041 |
2/2✓ Branch 0 taken 7 times.
✓ Branch 1 taken 4 times.
|
11 | for (m_uint i = 0; i < cap; ++i) |
1042 | 7 | info->data[(*info->d.idx)++] = (M_Object)m_vector_addr(ARRAY(base), i); | |
1043 | 4 | return base; | |
1044 | } | ||
1045 | |||
1046 | ANN static M_Object do_alloc_array(const VM_Shred shred, ArrayInfo *info); | ||
1047 | 4 | ANN static M_Object do_alloc_array_loop(const VM_Shred shred, ArrayInfo *info, | |
1048 | const m_uint cap, const M_Object base) { | ||
1049 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 3 times.
|
8 | for (m_uint i = 0; i < cap; ++i) { |
1050 | 5 | struct ArrayInfo_ aai = {info->depth + 1, info->type, info->base, | |
1051 | 5 | info->data, {info->d.idx}, info->is_obj}; | |
1052 | 5 | const M_Object next = do_alloc_array(shred, &aai); | |
1053 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 4 times.
|
5 | if (!next) { |
1054 | 1 | _release(base, shred); | |
1055 | 1 | return NULL; | |
1056 | } | ||
1057 | 4 | m_vector_set(ARRAY(base), i, &next); | |
1058 | } | ||
1059 | 3 | return base; | |
1060 | } | ||
1061 | |||
1062 | 18 | ANN static M_Object do_alloc_array(const VM_Shred shred, ArrayInfo *info) { | |
1063 | 18 | const m_int cap = *(m_int *)REG(info->depth * SZ_INT); | |
1064 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 16 times.
|
18 | if (cap < 0) { |
1065 | 2 | gw_err("{-}[{0}{+}Gwion{0}{-}](VM):{0} NegativeArraySize: while allocating arrays...\n"); | |
1066 | 2 | return NULL; | |
1067 | } | ||
1068 | 16 | const M_Object base = do_alloc_array_object(shred->info->mp, info, cap); | |
1069 | 4 | return info->depth < TOP ? do_alloc_array_loop(shred, info, (m_uint)cap, base) | |
1070 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 12 times.
|
32 | : info->data ? do_alloc_array_init(info, (m_uint)cap, base) |
1071 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 8 times.
|
12 | : base; |
1072 | } | ||
1073 | |||
1074 | 4 | ANN static M_Object *init_array(const VM_Shred shred, const ArrayInfo *info, | |
1075 | m_uint *num_obj) { | ||
1076 | 4 | m_int curr = -info->depth; | |
1077 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 4 times.
|
10 | while (curr <= TOP) { |
1078 | 6 | *num_obj *= *(m_uint *)REG(SZ_INT * curr); | |
1079 | 6 | ++curr; | |
1080 | } | ||
1081 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | return *num_obj > 0 ? (M_Object *)xcalloc(*num_obj, info->base->size) : NULL; |
1082 | } | ||
1083 | |||
1084 | 13 | INSTR(ArrayAlloc) { | |
1085 | 13 | const ArrayInfo * info = (ArrayInfo *)instr->m_val; | |
1086 | 13 | m_uint num_obj = 1; | |
1087 | 13 | m_int idx = 0; | |
1088 | 13 | struct ArrayInfo_ aai = {-info->depth, info->type, .base = info->base, | |
1089 | 13 | NULL, {&idx}, info->is_obj}; | |
1090 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 9 times.
|
13 | if (info->is_obj) aai.data = init_array(shred, info, &num_obj); |
1091 | 13 | const M_Object ref = do_alloc_array(shred, &aai); | |
1092 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 11 times.
|
13 | if (!ref) { |
1093 | 2 | gw_err("{-}[{0}{+}Gwion{0}{-}](VM):{0} (note: in shred[id=%" UINT_F ":%s])\n", | |
1094 | shred->tick->xid, shred->code->name); | ||
1095 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | if (info->is_obj) xfree(aai.data); |
1096 | 2 | handle(shred, "ArrayAllocException"); | |
1097 | 2 | return; // TODO make exception vararg | |
1098 | } | ||
1099 | 11 | *(void **)(ref->data + SZ_INT) = aai.data; | |
1100 |
2/2✓ Branch 0 taken 7 times.
✓ Branch 1 taken 4 times.
|
11 | if (!info->is_obj) { |
1101 | 7 | POP_REG(shred, SZ_INT * (info->depth - 1)); | |
1102 | 7 | *(M_Object *)REG(-SZ_INT) = ref; | |
1103 | } else { | ||
1104 | 4 | POP_REG(shred, SZ_INT * (info->depth - 4)); | |
1105 | 4 | *(M_Object *)REG(-SZ_INT * 4) = ref; | |
1106 | 4 | *(M_Object **)REG(-SZ_INT * 3) = aai.data; | |
1107 | 4 | *(m_uint *)REG(-SZ_INT * 2) = 0; | |
1108 | 4 | *(m_uint *)REG(-SZ_INT) = num_obj; | |
1109 | } | ||
1110 | } | ||
1111 | |||
1112 | ✗ | ANN static bool last_is_zero(Exp e) { | |
1113 | ✗ | while(e->next) e = e->next; | |
1114 | ✗ | return exp_is_zero(e); | |
1115 | } | ||
1116 | |||
1117 | ✗ | ANN2(1,2) m_bool check_array_instance(const Env env, Type_Decl *td, const Exp args) { | |
1118 | ✗ | if (!last_is_zero(td->array->exp)) { | |
1119 | ✗ | if (!args) | |
1120 | ✗ | ERR_B(td->pos, "declaration of abstract type arrays needs lambda"); | |
1121 | } else { | ||
1122 | ✗ | if(args) | |
1123 | ✗ | gwerr_warn("array is empty", "no need to provide a lambda", | |
1124 | ✗ | NULL, env->name, td->array->exp->pos); | |
1125 | } | ||
1126 | ✗ | return GW_OK; | |
1127 | } | ||
1128 |