1 |
|
|
#include "gwion_util.h" |
2 |
|
|
#include "gwion_ast.h" |
3 |
|
|
#include "gwion_env.h" |
4 |
|
|
#include "vm.h" |
5 |
|
|
#include "instr.h" |
6 |
|
|
#include "emit.h" |
7 |
|
|
#include "object.h" |
8 |
|
|
#include "vararg.h" |
9 |
|
|
#include "gwion.h" |
10 |
|
|
#include "operator.h" |
11 |
|
|
#include "import.h" |
12 |
|
|
#include "gwi.h" |
13 |
|
|
#include "specialid.h" |
14 |
|
|
#include "traverse.h" |
15 |
|
|
#include "parse.h" |
16 |
|
|
#include "gack.h" |
17 |
|
|
|
18 |
|
16 |
void free_vararg(MemPool p, struct Vararg_* arg) { |
19 |
✓✓ |
16 |
if(arg->t.ptr) { |
20 |
|
15 |
xfree(arg->d); |
21 |
|
15 |
vector_release(&arg->t); |
22 |
|
|
} |
23 |
|
16 |
mp_free(p, Vararg, arg); |
24 |
|
16 |
} |
25 |
|
|
|
26 |
|
16 |
static DTOR(vararg_dtor) { |
27 |
|
16 |
struct Vararg_ *arg = *(struct Vararg_**)o->data; |
28 |
✓✓ |
16 |
if(*(m_uint*)(o->data + SZ_INT*2)) { |
29 |
|
15 |
m_uint offset = 0; |
30 |
✓✓ |
54 |
for(m_uint i = 0; i < vector_size(&arg->t); ++i) { |
31 |
|
39 |
const Type t = (Type)vector_at(&arg->t, i); |
32 |
✓✓ |
39 |
if(isa(t, shred->info->vm->gwion->type[et_object]) > 0) |
33 |
|
4 |
release(*(M_Object*)(arg->d + offset), shred); |
34 |
✗✓ |
35 |
else if(GET_FLAG(t, struct)) |
35 |
|
|
struct_release(shred, t, *(m_bit**)(arg->d + offset)); |
36 |
|
39 |
offset += t->size; |
37 |
|
|
} |
38 |
|
|
} |
39 |
|
16 |
free_vararg(shred->info->vm->gwion->mp, arg); |
40 |
|
16 |
} |
41 |
|
|
|
42 |
|
1 |
static MFUN(mfun_vararg_cpy) { |
43 |
|
1 |
struct Vararg_ *src = *(struct Vararg_**)o->data; |
44 |
|
1 |
struct Vararg_* arg = mp_calloc(shred->info->mp, Vararg); |
45 |
|
1 |
vector_copy2(&src->t, &arg->t); |
46 |
|
1 |
arg->d = (m_bit*)xmalloc(round2szint(*(m_uint*)(o->data + SZ_INT*2))); |
47 |
|
1 |
m_uint offset = 0; |
48 |
✓✓ |
2 |
for(m_uint i = 0; i < vector_size(&arg->t); ++i) { |
49 |
|
1 |
const Type t = (Type)vector_at(&arg->t, arg->i); |
50 |
|
1 |
*(m_uint*)(arg->d + offset) = *(m_uint*)(src->d + offset); |
51 |
✗✓ |
1 |
if(isa(t, shred->info->vm->gwion->type[et_object]) > 0) |
52 |
|
|
++(*(M_Object*)(arg->d + offset))->ref; |
53 |
|
1 |
offset += t->size; |
54 |
|
|
} |
55 |
|
1 |
arg->s = vector_size(&arg->t); |
56 |
|
1 |
arg->i = src->i; |
57 |
|
1 |
arg->o = src->o; |
58 |
|
1 |
const M_Object obj = new_object(shred->info->mp, shred, o->type_ref); |
59 |
|
1 |
*(struct Vararg_**)obj->data = arg; |
60 |
|
1 |
*(m_uint*)(obj->data + SZ_INT*2) = *(m_uint*)(o->data + SZ_INT*2); |
61 |
|
1 |
*(M_Object*)RETURN = obj; |
62 |
|
1 |
} |
63 |
|
|
|
64 |
|
15 |
INSTR(VarargIni) { |
65 |
|
15 |
const M_Object o = new_object(shred->info->mp, shred, shred->info->vm->gwion->type[et_vararg]); |
66 |
|
15 |
struct Vararg_* arg = mp_calloc(shred->info->mp, Vararg); |
67 |
|
15 |
*(struct Vararg_**)o->data = arg; |
68 |
|
15 |
POP_REG(shred, instr->m_val - SZ_INT) |
69 |
|
|
// if((arg->l = instr->m_val)) { |
70 |
✓✓ |
15 |
if((*(m_uint*)(o->data + SZ_INT * 2) = instr->m_val)) { |
71 |
|
14 |
arg->d = (m_bit*)xmalloc(round2szint(instr->m_val)); |
72 |
|
14 |
const Vector kinds = (Vector)instr->m_val2; |
73 |
|
14 |
vector_copy2(kinds, &arg->t); |
74 |
|
14 |
m_uint offset = 0; |
75 |
✓✓ |
52 |
for(m_uint i = 0; i < vector_size(&arg->t); ++i) { |
76 |
|
38 |
const Type t = (Type)vector_at(&arg->t, i); |
77 |
|
38 |
*(m_uint*)(arg->d + offset) = *(m_uint*)(shred->reg - SZ_INT + offset); |
78 |
✓✓ |
38 |
if(isa(t, shred->info->vm->gwion->type[et_object]) > 0) { |
79 |
|
4 |
const M_Object obj = *(M_Object*)(arg->d + offset); |
80 |
✓✓ |
4 |
if(obj) |
81 |
|
2 |
++obj->ref; |
82 |
|
|
} |
83 |
|
38 |
offset += t->size; |
84 |
|
|
} |
85 |
|
14 |
arg->s = vector_size(kinds); |
86 |
|
|
} |
87 |
|
15 |
*(M_Object*)REG(-SZ_INT) = o; |
88 |
|
15 |
} |
89 |
|
|
|
90 |
|
9 |
static INSTR(VarargEnd) { |
91 |
|
9 |
const M_Object o = *(M_Object*)REG(0); |
92 |
|
9 |
struct Vararg_* arg = *(struct Vararg_**)o->data; |
93 |
✓✗ |
9 |
arg->o += arg->t.ptr ? ((Type)vector_at(&arg->t, arg->i))->size : 0; |
94 |
✓✓ |
9 |
if(++arg->i < arg->s) |
95 |
|
5 |
shred->pc = instr->m_val; |
96 |
|
|
else |
97 |
|
4 |
arg->i = arg->o = 0; |
98 |
|
9 |
} |
99 |
|
|
|
100 |
|
8 |
static OP_CHECK(opck_vararg_cast) { |
101 |
|
8 |
const Exp_Cast* cast = (Exp_Cast*)data; |
102 |
|
8 |
return known_type(env, cast->td); |
103 |
|
|
} |
104 |
|
|
|
105 |
|
12 |
static INSTR(VarargCast) { |
106 |
|
12 |
const M_Object o = *(M_Object*)REG(-SZ_INT); |
107 |
✓✓ |
12 |
if(!*(m_uint*)(o->data + SZ_INT)) |
108 |
|
1 |
Except(shred, "Using Vararg outside varloop"); |
109 |
✓✓ |
11 |
if(!*(m_uint*)(o->data + SZ_INT*2)) |
110 |
|
1 |
Except(shred, "Using Vararg cast on empty vararg"); |
111 |
|
10 |
struct Vararg_* arg = *(struct Vararg_**)o->data; |
112 |
|
10 |
const Type t = (Type)instr->m_val; |
113 |
✓✓ |
10 |
if(isa((Type)vector_at(&arg->t, arg->i), t) < 0) |
114 |
|
1 |
Except(shred, "InvalidVariadicAccess"); |
115 |
✓✓ |
18 |
for(m_uint i = 0; i < t->size; i += SZ_INT) |
116 |
|
9 |
*(m_uint*)REG(i - SZ_INT) = *(m_uint*)(arg->d + arg->o + i); |
117 |
|
|
} |
118 |
|
|
|
119 |
|
8 |
static OP_EMIT(opem_vararg_cast) { |
120 |
|
8 |
const Exp_Cast* cast = (Exp_Cast*)data; |
121 |
|
8 |
const Instr instr = emit_add_instr(emit, VarargCast); |
122 |
|
8 |
instr->m_val = (m_uint)exp_self(cast)->info->type; |
123 |
|
8 |
const Instr push = emit_add_instr(emit, RegPush); |
124 |
|
8 |
push->m_val = exp_self(cast)->info->type->size - SZ_INT; |
125 |
|
8 |
return instr; |
126 |
|
|
} |
127 |
|
|
|
128 |
|
18 |
static FREEARG(freearg_vararg) { |
129 |
✓✓ |
18 |
if(instr->m_val2) |
130 |
|
15 |
free_vector(((Gwion)gwion)->mp, (Vector)instr->m_val2); |
131 |
|
18 |
} |
132 |
|
|
|
133 |
|
21 |
static ID_CHECK(idck_vararg) { |
134 |
✓✓✓✗
|
21 |
if(env->func && GET_FLAG(env->func->def, variadic)) |
135 |
|
20 |
return nonnul_type(env, exp_self(prim)->info->type); |
136 |
|
1 |
ERR_O(exp_self(prim)->pos, _("'vararg' must be used inside variadic function")) |
137 |
|
|
} |
138 |
|
|
|
139 |
|
30 |
static ID_EMIT(idem_vararg) { |
140 |
|
30 |
const Instr instr = emit_add_instr(emit, RegPushMem); |
141 |
|
30 |
instr->m_val = emit->code->stack_depth - SZ_INT; |
142 |
|
30 |
return instr; |
143 |
|
|
} |
144 |
|
|
|
145 |
|
2 |
static GACK(gack_vararg) { |
146 |
|
2 |
INTERP_PRINTF("%p", *(M_Object*)VALUE); |
147 |
|
2 |
} |
148 |
|
|
|
149 |
|
5 |
ANN void emit_vararg_end(const Emitter emit, const m_uint pc) { |
150 |
|
5 |
const Instr pop = emit_add_instr(emit, RegPop); |
151 |
|
5 |
pop->m_val = SZ_INT; |
152 |
|
5 |
const Instr instr = emit_add_instr(emit, VarargEnd); |
153 |
|
5 |
instr->m_val = pc; |
154 |
|
5 |
} |
155 |
|
|
|
156 |
|
730 |
GWION_IMPORT(vararg) { |
157 |
|
730 |
const Type t_vararg = gwi_class_ini(gwi, "Vararg", "Object"); |
158 |
|
730 |
gwi_class_xtor(gwi, NULL, vararg_dtor); |
159 |
|
730 |
gwi_gack(gwi, t_vararg, gack_vararg); |
160 |
✗✓ |
730 |
CHECK_BB(gwi_item_ini(gwi, "@internal", "@data")) |
161 |
✗✓ |
730 |
CHECK_BB(gwi_item_end(gwi, ae_flag_none, NULL)) |
162 |
✗✓ |
730 |
CHECK_BB(gwi_item_ini(gwi, "int", "@inLoop")) |
163 |
✗✓ |
730 |
CHECK_BB(gwi_item_end(gwi, ae_flag_none, NULL)) |
164 |
✗✓ |
730 |
CHECK_BB(gwi_item_ini(gwi, "int", "@len")) |
165 |
✗✓ |
730 |
CHECK_BB(gwi_item_end(gwi, ae_flag_none, NULL)) |
166 |
✗✓ |
730 |
CHECK_BB(gwi_func_ini(gwi, "Vararg", "cpy")) |
167 |
✗✓ |
730 |
CHECK_BB(gwi_func_end(gwi, mfun_vararg_cpy, ae_flag_none)) |
168 |
|
730 |
GWI_BB(gwi_class_end(gwi)) |
169 |
|
730 |
SET_FLAG(t_vararg, abstract); |
170 |
✗✓ |
730 |
CHECK_BB(gwi_set_global_type(gwi, t_vararg, et_vararg)) |
171 |
|
730 |
GWI_BB(gwi_oper_ini(gwi, "nonnull Vararg", (m_str)OP_ANY_TYPE, NULL)) |
172 |
|
730 |
GWI_BB(gwi_oper_add(gwi, opck_vararg_cast)) |
173 |
|
730 |
GWI_BB(gwi_oper_emi(gwi, opem_vararg_cast)) |
174 |
|
730 |
GWI_BB(gwi_oper_end(gwi, "$", NULL)) |
175 |
|
730 |
gwi_register_freearg(gwi, VarargIni, freearg_vararg); |
176 |
|
730 |
struct SpecialId_ spid = { .type=t_vararg, .is_const=1, .ck=idck_vararg, .em=idem_vararg}; |
177 |
|
730 |
gwi_specialid(gwi, "vararg", &spid); |
178 |
|
730 |
return GW_OK; |
179 |
|
|
} |