1 |
|
|
#include <stdlib.h> |
2 |
|
|
#include <string.h> |
3 |
|
|
#include <stdarg.h> |
4 |
|
|
#include "gwion_util.h" |
5 |
|
|
#include "gwion_ast.h" |
6 |
|
|
#include "gwion_env.h" |
7 |
|
|
#include "vm.h" |
8 |
|
|
#include "object.h" |
9 |
|
|
#include "instr.h" |
10 |
|
|
#include "gwion.h" |
11 |
|
|
#include "operator.h" |
12 |
|
|
#include "import.h" |
13 |
|
|
#include "gack.h" |
14 |
|
|
|
15 |
|
1462 |
ANN2(1) static int fmtlen(const char *fmt, va_list args) { |
16 |
|
|
va_list tmpa; |
17 |
|
1462 |
va_copy(tmpa, args); |
18 |
|
1462 |
const int size = vsnprintf(NULL, 0, fmt, tmpa); |
19 |
|
1462 |
va_end(tmpa); |
20 |
|
1462 |
return size; |
21 |
|
|
} |
22 |
|
|
|
23 |
|
1462 |
ANN2(2) static int gw_vasprintf(MemPool mp, char **str, const char *fmt, va_list args) { |
24 |
|
1462 |
char *base = *str; |
25 |
✓✓ |
1462 |
const size_t base_len = base ? strlen(base) : 0; |
26 |
✗✓ |
1462 |
DECL_BB(const int, size, = fmtlen(fmt, args)) |
27 |
|
1462 |
char *ret = mp_malloc2(mp, base_len + size + 1); |
28 |
✓✓ |
1462 |
if(base) |
29 |
|
373 |
strcpy(ret, base); |
30 |
|
1462 |
const int final_len = vsprintf(ret + base_len, fmt, args); |
31 |
✗✓ |
1462 |
if(final_len < 0) { |
32 |
|
|
mp_free2(mp, base_len + size + 1, ret); |
33 |
|
|
return -1; |
34 |
|
|
} |
35 |
✓✓ |
1462 |
if(base) |
36 |
|
373 |
mp_free2(mp, strlen(base), base); |
37 |
|
1462 |
*str = ret; |
38 |
|
1462 |
return final_len; |
39 |
|
|
} |
40 |
|
|
|
41 |
|
1462 |
ANN2(2) int gw_asprintf(MemPool mp, char **str, const char *fmt, ...) { |
42 |
|
|
va_list args; |
43 |
|
1462 |
va_start(args, fmt); |
44 |
|
1462 |
const int ret = gw_vasprintf(mp, str, fmt, args); |
45 |
|
1462 |
va_end(args); |
46 |
|
1462 |
return ret; |
47 |
|
|
} |
48 |
|
|
|
49 |
|
4 |
ANN static void prepare_call(const VM_Shred shred, const m_uint offset) { |
50 |
|
4 |
shred->mem += offset; |
51 |
|
4 |
*(m_uint*)(shred->mem + SZ_INT) = offset + SZ_INT; |
52 |
|
4 |
*(VM_Code*)(shred->mem + SZ_INT*2) = shred->code; |
53 |
|
4 |
*(m_uint*)(shred->mem + SZ_INT*3) = shred->pc; |
54 |
|
4 |
*(m_uint*)(shred->mem + SZ_INT*4) = SZ_INT; |
55 |
|
4 |
shred->mem += SZ_INT*5; |
56 |
|
4 |
shred->pc = 0; |
57 |
|
4 |
} |
58 |
|
|
|
59 |
|
1466 |
ANN void gack(const VM_Shred shred, const m_uint offset) { |
60 |
|
1466 |
const Type t = *(Type*)shred->reg; |
61 |
|
1466 |
const VM_Code code = get_gack(t)->e->gack; |
62 |
✓✓ |
1466 |
if(GET_FLAG(code, builtin)) { |
63 |
|
1462 |
const m_uint sz = *(m_uint*)(shred->reg + SZ_INT); |
64 |
|
1462 |
((f_gack)code->native_func)(t, (shred->reg - sz), shred); |
65 |
|
1462 |
POP_REG(shred, sz); |
66 |
|
|
} else { |
67 |
|
4 |
prepare_call(shred, offset); |
68 |
✓✓ |
4 |
if(GET_FLAG(t, struct)) |
69 |
|
2 |
*(void**)(shred->mem) = (void*)(shred->reg - t->size); |
70 |
|
|
else |
71 |
|
2 |
*(M_Object*)(shred->mem) = *(M_Object*)(shred->reg - SZ_INT); |
72 |
|
4 |
shred->code = code; |
73 |
|
|
} |
74 |
|
1466 |
return; |
75 |
|
|
} |
76 |
|
|
|