GCC Code Coverage Report
Directory: src/ Exec Total Coverage
File: src/lib/shred.c Lines: 255 263 97.0 %
Date: 2020-08-07 19:15:19 Branches: 53 72 73.6 %

Line Branch Exec Source
1
#include "gwion_util.h"
2
#include "gwion_ast.h"
3
#include "gwion_env.h"
4
#include "gwion_thread.h"
5
#include "vm.h"
6
#include "instr.h"
7
#include "object.h"
8
#include "shreduler_private.h"
9
#include "gwion.h"
10
#include "operator.h"
11
#include "import.h"
12
#include "emit.h"
13
#include "specialid.h"
14
#include "gwi.h"
15
16
static m_int o_fork_thread, o_fork_cond, o_fork_mutex, o_shred_cancel, o_fork_done, o_fork_ev, o_fork_retsize, o_fork_ptr;
17
18
#define FORK_THREAD(o) *(THREAD_TYPE*)(o->data + o_fork_thread)
19
#define FORK_COND(o) *(THREAD_COND_TYPE*)(o->data + o_fork_cond)
20
#define FORK_MUTEX(o) *(MUTEX_TYPE*)(o->data + o_fork_mutex)
21
#define FORK_RETSIZE(o) *(m_int*)(o->data + o_fork_retsize)
22
#define FORK_PTR(o) *(m_uint**)(o->data + o_fork_ptr)
23
24
82
VM_Shred new_shred_base(const VM_Shred shred, const VM_Code code) {
25
82
  const VM_Shred sh = new_vm_shred(shred->info->mp, code);
26
82
  ADD_REF(code)
27
82
  sh->base = shred->base;
28
82
  return sh;
29
}
30
31
1216
M_Object new_shred(const VM_Shred shred, m_bool is_spork) {
32
1216
  const M_Object obj = new_object(shred->info->mp, NULL,
33
1216
    shred->info->vm->gwion->type[is_spork ? et_shred :et_fork]);
34
1216
  ME(obj) = shred;
35
1216
  if(!is_spork) {
36
7
    *(M_Object*)(obj->data + o_fork_ev) = new_object(shred->info->mp, NULL, shred->info->vm->gwion->type[et_event]);
37
7
    EV_SHREDS(*(M_Object*)(obj->data + o_fork_ev)) = new_vector(shred->info->mp);
38
  }
39
1216
  return obj;
40
}
41
42
2
static MFUN(gw_shred_exit) {
43
2
  const VM_Shred s = ME(o);
44
2
  s->mem -= SZ_INT;
45
2
  vm_shred_exit(s);
46
2
}
47
48
6
static MFUN(vm_shred_id) {
49
6
  const VM_Shred s = ME(o);
50
6
  *(m_int*)RETURN = s ? (m_int)s->tick->xid : -1;
51
6
}
52
53
2
static MFUN(vm_shred_is_running) {
54
2
  const VM_Shred s = ME(o);
55

2
  *(m_uint*)RETURN = (s->tick->next || s->tick->prev) ? 1 : 0;
56
2
}
57
58
2
static MFUN(vm_shred_is_done) {
59
2
  *(m_uint*)RETURN = ME(o) ? 0 : 1;
60
2
}
61
62
23
static MFUN(shred_yield) {
63
23
  const VM_Shred s = ME(o);
64
23
  const Shreduler sh = s->tick->shreduler;
65
23
  if(s != shred)
66
    shreduler_remove(sh, s, 0);
67
23
  shredule(sh, s, GWION_EPSILON);
68
23
}
69
70
4
static SFUN(vm_shred_from_id) {
71
4
  const m_int index =  *(m_int*)MEM(0);
72
4
  if(index > 0) {
73
2
    for(m_uint i = 0; i < vector_size(&shred->tick->shreduler->shreds); ++i) {
74
2
      const VM_Shred s = (VM_Shred)vector_at(&shred->tick->shreduler->shreds, i);
75
2
      if(s->tick->xid == (m_uint)index) {
76
2
        *(M_Object*)RETURN = s->info->me;
77
2
        return;
78
      }
79
    }
80
  }
81
2
  *(m_uint*)RETURN = 0;
82
}
83
84
5
static MFUN(shred_args) {
85
5
  const VM_Shred s = ME(o);
86
5
  *(m_uint*)RETURN = s->info->args ? vector_size(s->info->args) : 0;
87
5
}
88
89
3
static MFUN(shred_arg) {
90
3
  const VM_Shred s = ME(o);
91
3
  const m_int idx = *(m_int*)MEM(SZ_INT);
92

4
  if(s->info->args && idx >= 0) {
93
1
    const m_str str = (m_str)vector_at(s->info->args, *(m_uint*)MEM(SZ_INT));
94
1
    *(M_Object*)RETURN = str ? new_string(shred->info->mp, shred, str) : NULL;
95
  } else
96
2
    *(m_uint*)RETURN = 0;
97
3
}
98
99
#ifndef BUILD_ON_WINDOWS
100
#define PATH_CHR '/'
101
#else
102
#define PATH_CHR '\\'
103
#endif
104
105
#define describe_name(name, src) \
106
static MFUN(shred##name##_name) { \
107
  const VM_Shred s = ME(o); \
108
  const m_str str = code_name((src), 0); \
109
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, str); \
110
}
111
2
describe_name(, s->info->name)
112
2
describe_name(_code, s->code->name)
113
114
#define describe_path_and_dir(name, src) \
115
static MFUN(shred##name##_path) { \
116
  const VM_Shred s = ME(o); \
117
  const m_str str = code_name((src), 1); \
118
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, str); \
119
} \
120
static MFUN(shred##name##_dir) { \
121
  const VM_Shred  s = ME(o); \
122
  const m_str str = code_name((src), 1); \
123
  const size_t len = strlen(str); \
124
  char c[len + 1]; \
125
  strcpy(c, str); \
126
  size_t sz = len;\
127
  while(sz) {\
128
    if(c[sz] == PATH_CHR) {\
129
      c[sz] = 0;\
130
      break;\
131
    }\
132
    --sz;\
133
  }\
134
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, c); \
135
}
136

8
describe_path_and_dir(, s->info->name)
137

8
describe_path_and_dir(_code, s->code->name)
138
139
483
static DTOR(shred_dtor) {
140
483
  if(ME(o)) {
141
483
    MUTEX_TYPE mutex = ME(o)->tick->shreduler->mutex;
142
483
    MUTEX_LOCK(mutex);
143
483
    free_vm_shred(ME(o));
144
483
    MUTEX_UNLOCK(mutex);
145
  }
146
483
}
147
148
1
static MFUN(shred_lock) {
149
1
  MUTEX_LOCK(ME(o)->tick->shreduler->mutex);
150
1
}
151
152
1
static MFUN(shred_unlock) {
153
1
  MUTEX_UNLOCK(ME(o)->tick->shreduler->mutex);
154
1
}
155
156
11
static void stop(const M_Object o) {
157
11
  VM *vm = ME(o)->info->vm;
158
11
  MUTEX_LOCK(vm->shreduler->mutex);
159
11
  vm->shreduler->bbq->is_running = 0;
160
11
  *(m_int*)(o->data + o_shred_cancel) = 1;
161
11
  MUTEX_UNLOCK(vm->shreduler->mutex);
162
11
}
163
164
7
static void join(const M_Object o) {
165
7
  if(FORK_THREAD(o)) {
166
7
    THREAD_JOIN(FORK_THREAD(o));
167
7
    FORK_THREAD(o) = 0;
168
  }
169
7
}
170
171
4
static DTOR(fork_dtor) {
172
4
  *(m_int*)(o->data + o_fork_done) = 1;
173
4
  stop(o);
174
4
  VM *parent = ME(o)->info->vm->parent;
175
4
  MUTEX_LOCK(parent->shreduler->mutex);
176
4
  if(parent->gwion->data->child.ptr) {
177
4
    const m_int idx = vector_find(&parent->gwion->data->child, (vtype)o);
178
4
    if(idx > -1)
179
4
    VPTR(&parent->gwion->data->child, idx) = 0;
180
  }
181
4
  if(!parent->gwion->data->child2.ptr)
182
4
    vector_init(&parent->gwion->data->child2);
183
4
  vector_add(&parent->gwion->data->child2, (vtype)ME(o)->info->vm->gwion);
184
4
  REM_REF(ME(o)->code, ME(o)->info->vm->gwion);
185
4
  mp_free2(shred->info->vm->gwion->mp, FORK_RETSIZE(o), FORK_PTR(o));
186
4
  MUTEX_UNLOCK(parent->shreduler->mutex);
187
4
}
188
189
3
static MFUN(fork_join) {
190
3
  if(*(m_int*)(o->data + o_fork_done))
191
1
    return;
192
2
  shreduler_remove(shred->tick->shreduler, shred, 0);
193
2
  vector_add(EV_SHREDS(*(M_Object*)(o->data + o_fork_ev)), (vtype)shred);
194
}
195
196
4
static MFUN(shred_cancel) {
197
4
  MUTEX_LOCK(ME(o)->tick->shreduler->mutex);
198
4
  *(m_int*)(o->data + o_shred_cancel) = *(m_int*)MEM(SZ_INT);
199
4
  MUTEX_UNLOCK(ME(o)->tick->shreduler->mutex);
200
4
}
201
202
2
static MFUN(shred_test_cancel) {
203
2
  if(*(m_int*)(o->data + o_shred_cancel))
204
1
    vm_shred_exit(ME(o));
205
2
}
206
207
1
static MFUN(fork_test_cancel) {
208
1
  if(*(m_int*)(o->data + o_shred_cancel)) {
209
    stop(o);
210
    join(o);
211
    _release(o, ME(o));
212
    vm_shred_exit(ME(o));
213
  }
214
1
}
215
216
1
static MFUN(shred_now) {
217
1
  VM *vm = shred->info->vm;
218
3
  while(vm->parent)
219
1
    vm = vm->parent;
220
1
  *(m_float*)RETURN = vm->bbq->pos;
221
1
}
222
223
struct ThreadLauncher {
224
  MUTEX_TYPE mutex;
225
  THREAD_COND_TYPE cond;
226
  VM *vm;
227
};
228
229
2880289
static inline int fork_running(VM *vm, const M_Object o) {
230
2880289
  MUTEX_LOCK(vm->shreduler->mutex);
231

2880289
  const int ret = vm->bbq->is_running && !*(m_int*)(o->data + o_shred_cancel);
232
2880289
  MUTEX_UNLOCK(vm->shreduler->mutex);
233
2880289
  return ret;
234
}
235
236
7
static ANN THREAD_FUNC(fork_run) {
237
7
struct ThreadLauncher *tl = data;
238
7
  VM *vm = tl->vm;
239
7
  MUTEX_TYPE mutex = tl->mutex;
240
7
  const M_Object me = vm->shreduler->list->self->info->me;
241
7
  ++me->ref;
242
7
  MUTEX_COND_LOCK(mutex);
243
7
  THREAD_COND_SIGNAL(FORK_COND(me));
244
7
  MUTEX_COND_UNLOCK(mutex);
245
2880296
  while(fork_running(vm, me)) {
246
2880282
    vm_run(vm);
247
2880282
    ++vm->bbq->pos;
248
  }
249
7
  memcpy(FORK_PTR(me), ME(me)->reg, FORK_RETSIZE(me));
250
7
  gwion_end_child(ME(me), vm->gwion);
251
7
  MUTEX_LOCK(vm->parent->shreduler->mutex);
252
7
  *(m_int*)(me->data + o_fork_done) = 1;
253
7
  if(!*(m_int*)(me->data + o_shred_cancel))
254
3
    broadcast(*(M_Object*)(me->data + o_fork_ev));
255
7
  MUTEX_UNLOCK(vm->parent->shreduler->mutex);
256
7
  THREAD_RETURN(0);
257
}
258
259
7
ANN void fork_launch(const M_Object o, const m_uint sz) {
260
7
  FORK_RETSIZE(o) = sz;
261
7
  FORK_PTR(o) = mp_calloc2(ME(o)->info->vm->gwion->mp, sz);
262
7
  MUTEX_SETUP(FORK_MUTEX(o));
263
7
  THREAD_COND_SETUP(FORK_COND(o));
264
7
  struct ThreadLauncher tl = { .mutex=FORK_MUTEX(o), .cond=FORK_COND(o), .vm=ME(o)->info->vm };
265
7
  MUTEX_COND_LOCK(tl.mutex);
266
7
  THREAD_CREATE(FORK_THREAD(o), fork_run, &tl);
267
7
  THREAD_COND_WAIT(FORK_COND(o), tl.mutex);
268
7
  MUTEX_COND_UNLOCK(tl.mutex);
269
7
  THREAD_COND_CLEANUP(FORK_COND(o));
270
7
  MUTEX_CLEANUP(FORK_MUTEX(o));
271
7
}
272
273
7
ANN void fork_clean(const VM_Shred shred, const Vector v) {
274
14
  for(m_uint i = 0; i < vector_size(v); ++i) {
275
7
    const M_Object o = (M_Object)vector_at(v, i);
276
7
    if(!o)
277
      continue;
278
7
    stop(o);
279
  }
280
14
  for(m_uint i = 0; i < vector_size(v); ++i) {
281
7
    const M_Object o = (M_Object)vector_at(v, i);
282
7
    if(!o)
283
      continue;
284
7
    join(o);
285
  }
286
14
  for(m_uint i = 0; i < vector_size(v); ++i) {
287
7
    const M_Object o = (M_Object)vector_at(v, i);
288
7
    if(!o)
289
      continue;
290
7
    _release(o, shred);
291
   }
292
7
  vector_release(v);
293
7
  v->ptr = NULL;
294
7
}
295
296
730
GWION_IMPORT(shred) {
297
730
  const Type t_shred = gwi_class_ini(gwi,  "Shred", NULL);
298
730
  gwi_class_xtor(gwi, NULL, shred_dtor);
299
730
  gwi->gwion->type[et_shred] = t_shred;
300
301
730
  gwi_item_ini(gwi, "@internal", "@me");
302
730
  GWI_BB(gwi_item_end(gwi, ae_flag_const, NULL))
303
304
730
  gwi_item_ini(gwi, "int", "cancel");
305
730
  GWI_BB((o_shred_cancel = gwi_item_end(gwi, ae_flag_const, NULL)))
306
307
730
  gwi_func_ini(gwi, "void", "exit");
308
730
  GWI_BB(gwi_func_end(gwi, gw_shred_exit, ae_flag_none))
309
310
730
  gwi_func_ini(gwi, "int", "running");
311
730
  GWI_BB(gwi_func_end(gwi, vm_shred_is_running, ae_flag_none))
312
313
730
  gwi_func_ini(gwi, "int", "done");
314
730
  GWI_BB(gwi_func_end(gwi, vm_shred_is_done, ae_flag_none))
315
316
730
  gwi_func_ini(gwi, "int", "id");
317
730
  GWI_BB(gwi_func_end(gwi, vm_shred_id, ae_flag_none))
318
319
730
  gwi_func_ini(gwi, "Shred", "fromId");
320
730
  gwi_func_arg(gwi, "int", "xid");
321
730
  GWI_BB(gwi_func_end(gwi, vm_shred_from_id, ae_flag_static))
322
323
730
  gwi_func_ini(gwi, "void", "yield");
324
730
  GWI_BB(gwi_func_end(gwi, shred_yield, ae_flag_none))
325
326
730
  gwi_func_ini(gwi, "int", "args");
327
730
  GWI_BB(gwi_func_end(gwi, shred_args, ae_flag_none))
328
329
730
  gwi_func_ini(gwi, "string", "arg");
330
730
  gwi_func_arg(gwi, "int", "n");
331
730
  GWI_BB(gwi_func_end(gwi, shred_arg, ae_flag_none))
332
333
730
  gwi_func_ini(gwi, "string", "name");
334
730
  GWI_BB(gwi_func_end(gwi, shred_name, ae_flag_none))
335
336
730
  gwi_func_ini(gwi, "string", "path");
337
730
  GWI_BB(gwi_func_end(gwi, shred_path, ae_flag_none))
338
339
730
  gwi_func_ini(gwi, "string", "dir");
340
730
  GWI_BB(gwi_func_end(gwi, shred_dir, ae_flag_none))
341
342
730
  gwi_func_ini(gwi, "string", "code_name");
343
730
  GWI_BB(gwi_func_end(gwi, shred_code_name, ae_flag_none))
344
345
730
  gwi_func_ini(gwi, "string", "code_path");
346
730
  GWI_BB(gwi_func_end(gwi, shred_code_path, ae_flag_none))
347
348
730
  gwi_func_ini(gwi, "string", "code_dir");
349
730
  GWI_BB(gwi_func_end(gwi, shred_code_dir, ae_flag_none))
350
351
730
  gwi_func_ini(gwi, "void", "set_cancel");
352
730
  gwi_func_arg(gwi, "int", "n");
353
730
  GWI_BB(gwi_func_end(gwi, shred_cancel, ae_flag_none))
354
730
  gwi_func_ini(gwi, "void", "test_cancel");
355
730
  GWI_BB(gwi_func_end(gwi, shred_test_cancel, ae_flag_none))
356
730
  gwi_func_ini(gwi, "void", "lock");
357
730
  GWI_BB(gwi_func_end(gwi, shred_lock, ae_flag_none))
358
730
  gwi_func_ini(gwi, "void", "unlock");
359
730
  GWI_BB(gwi_func_end(gwi, shred_unlock, ae_flag_none))
360
730
  gwi_func_ini(gwi, "float", "get_now");
361
730
  GWI_BB(gwi_func_end(gwi, shred_now, ae_flag_none))
362
730
  GWI_BB(gwi_class_end(gwi))
363
364
730
  struct SpecialId_ spid = { .type=t_shred, .exec=RegPushMe, .is_const=1 };
365
730
  gwi_specialid(gwi, "me", &spid);
366
367
730
  SET_FLAG((t_shred), abstract);
368
369
730
  const Type t_fork= gwi_class_ini(gwi,  "Fork", "Shred");
370
730
  gwi_class_xtor(gwi, NULL, fork_dtor);
371
730
  gwi->gwion->type[et_fork] = t_fork;
372
373
730
  gwi_item_ini(gwi, "@internal", "@thread");
374
730
  GWI_BB((o_fork_thread = gwi_item_end(gwi, ae_flag_const, NULL)))
375
730
  gwi_item_ini(gwi, "@internal", "@cond");
376
730
  GWI_BB((o_fork_cond = gwi_item_end(gwi, ae_flag_const, NULL)))
377
730
  gwi_item_ini(gwi, "@internal", "@mutex");
378
730
  GWI_BB((o_fork_mutex = gwi_item_end(gwi, ae_flag_const, NULL)))
379
730
  gwi_item_ini(gwi, "int", "is_done");
380
730
  GWI_BB((o_fork_done = gwi_item_end(gwi, ae_flag_const, NULL)))
381
730
  gwi_item_ini(gwi, "Event", "ev");
382
730
  GWI_BB((o_fork_ev = gwi_item_end(gwi, ae_flag_const, NULL)))
383
730
  gwi_item_ini(gwi, "int", "retsize");
384
730
  GWI_BB((o_fork_retsize = gwi_item_end(gwi, ae_flag_const, NULL)))
385
730
  gwi_item_ini(gwi, "@internal", "@ptr");
386
730
  GWI_BB((o_fork_ptr = gwi_item_end(gwi, ae_flag_const, NULL)))
387
730
  gwi_func_ini(gwi, "void", "join");
388
730
  GWI_BB(gwi_func_end(gwi, fork_join, ae_flag_none))
389
730
  gwi_func_ini(gwi, "void", "test_cancel");
390
730
  GWI_BB(gwi_func_end(gwi, fork_test_cancel, ae_flag_none))
391
730
  GWI_BB(gwi_class_end(gwi))
392
730
  SET_FLAG((t_fork), abstract);
393
730
  return GW_OK;
394
}