GCC Code Coverage Report
Directory: src/ Exec Total Coverage
File: src/lib/shred.c Lines: 263 276 95.3 %
Date: 2020-09-14 09:03:05 Branches: 47 68 69.1 %

Line Branch Exec Source
1
#include "gwion_util.h"
2
#include "gwion_ast.h"
3
#include "gwion_env.h"
4
#include "gwion_thread.h"
5
#include "vm.h"
6
#include "instr.h"
7
#include "object.h"
8
#include "shreduler_private.h"
9
#include "gwion.h"
10
#include "operator.h"
11
#include "import.h"
12
#include "emit.h"
13
#include "specialid.h"
14
#include "gwi.h"
15
16
static m_int o_fork_thread, o_fork_cond, o_fork_mutex, o_shred_cancel, o_fork_done, o_fork_ev, o_fork_retsize;
17
18
#define FORK_THREAD(o) *(THREAD_TYPE*)(o->data + o_fork_thread)
19
#define FORK_COND(o) *(THREAD_COND_TYPE*)(o->data + o_fork_cond)
20
#define FORK_MUTEX(o) *(MUTEX_TYPE*)(o->data + o_fork_mutex)
21
#define FORK_RETSIZE(o) *(m_int*)(o->data + o_fork_retsize)
22
23
82
VM_Shred new_shred_base(const VM_Shred shred, const VM_Code code) {
24
82
  const VM_Shred sh = new_vm_shred(shred->info->mp, code);
25
82
  ADD_REF(code)
26
82
  sh->base = shred->base;
27
82
  return sh;
28
}
29
30
1198
M_Object new_shred(const VM_Shred shred) {
31
1198
  const M_Object obj = new_object(shred->info->mp, NULL,
32
1198
    shred->info->vm->gwion->type[et_shred]);
33
1198
  ME(obj) = shred;
34
1198
  return obj;
35
}
36
37
7
ANN static inline M_Object fork_object(const VM_Shred shred, const Type t) {
38
7
  const Gwion gwion = shred->info->vm->gwion;
39
7
  const M_Object o = new_object(gwion->mp, shred, t);
40
7
  *(M_Object*)(o->data + o_fork_ev) = new_object(gwion->mp, NULL, gwion->type[et_event]);
41
7
  EV_SHREDS(*(M_Object*)(o->data + o_fork_ev)) = new_vector(gwion->mp);
42
7
  return o;
43
}
44
45
7
ANN M_Object new_fork(const VM_Shred shred, const VM_Code code, const Type t) {
46
7
  VM* parent = shred->info->vm;
47
7
  const VM_Shred sh = new_shred_base(shred, code);
48
7
  VM* vm = (sh->info->vm = gwion_cpy(parent));
49
7
  vm->parent = parent;
50
7
  const M_Object o = sh->info->me = fork_object(shred, t);
51
7
  ME(o) = sh;
52
7
  ++o->ref;
53
7
  shreduler_add(vm->shreduler, sh);
54
7
  return o;
55
}
56
57
58
2
static MFUN(gw_shred_exit) {
59
2
  const VM_Shred s = ME(o);
60
2
  s->mem -= SZ_INT;
61
2
  vm_shred_exit(s);
62
2
}
63
64
6
static MFUN(vm_shred_id) {
65
6
  const VM_Shred s = ME(o);
66
6
  *(m_int*)RETURN = s ? (m_int)s->tick->xid : -1;
67
6
}
68
69
2
static MFUN(vm_shred_is_running) {
70
2
  const VM_Shred s = ME(o);
71

2
  *(m_uint*)RETURN = (s->tick->next || s->tick->prev) ? 1 : 0;
72
2
}
73
74
2
static MFUN(vm_shred_is_done) {
75
2
  *(m_uint*)RETURN = ME(o) ? 0 : 1;
76
2
}
77
78
23
static MFUN(shred_yield) {
79
23
  const VM_Shred s = ME(o);
80
23
  const Shreduler sh = s->tick->shreduler;
81
23
  if(s != shred)
82
    shreduler_remove(sh, s, 0);
83
23
  shredule(sh, s, GWION_EPSILON);
84
23
}
85
86
4
static SFUN(vm_shred_from_id) {
87
4
  const m_int index =  *(m_int*)MEM(0);
88
4
  if(index > 0) {
89
2
    for(m_uint i = 0; i < vector_size(&shred->tick->shreduler->shreds); ++i) {
90
2
      const VM_Shred s = (VM_Shred)vector_at(&shred->tick->shreduler->shreds, i);
91
2
      if(s->tick->xid == (m_uint)index) {
92
2
        *(M_Object*)RETURN = s->info->me;
93
2
        return;
94
      }
95
    }
96
  }
97
2
  *(m_uint*)RETURN = 0;
98
}
99
100
5
static MFUN(shred_args) {
101
5
  const VM_Shred s = ME(o);
102
5
  *(m_uint*)RETURN = s->info->args ? vector_size(s->info->args) : 0;
103
5
}
104
105
3
static MFUN(shred_arg) {
106
3
  const VM_Shred s = ME(o);
107
3
  const m_int idx = *(m_int*)MEM(SZ_INT);
108

4
  if(s->info->args && idx >= 0) {
109
1
    const m_str str = (m_str)vector_at(s->info->args, *(m_uint*)MEM(SZ_INT));
110
1
    *(M_Object*)RETURN = str ? new_string(shred->info->mp, shred, str) : NULL;
111
  } else
112
2
    *(m_uint*)RETURN = 0;
113
3
}
114
115
#ifndef BUILD_ON_WINDOWS
116
#define PATH_CHR '/'
117
#else
118
#define PATH_CHR '\\'
119
#endif
120
121
#define describe_name(name, src) \
122
static MFUN(shred##name##_name) { \
123
  const VM_Shred s = ME(o); \
124
  const m_str str = code_name((src), 0); \
125
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, str); \
126
}
127
2
describe_name(, s->info->name)
128
2
describe_name(_code, s->code->name)
129
130
#define describe_path_and_dir(name, src) \
131
static MFUN(shred##name##_path) { \
132
  const VM_Shred s = ME(o); \
133
  const m_str str = code_name((src), 1); \
134
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, str); \
135
} \
136
static MFUN(shred##name##_dir) { \
137
  const VM_Shred  s = ME(o); \
138
  const m_str str = code_name((src), 1); \
139
  const size_t len = strlen(str); \
140
  char c[len + 1]; \
141
  strcpy(c, str); \
142
  size_t sz = len;\
143
  while(sz) {\
144
    if(c[sz] == PATH_CHR) {\
145
      c[sz] = 0;\
146
      break;\
147
    }\
148
    --sz;\
149
  }\
150
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, c); \
151
}
152

8
describe_path_and_dir(, s->info->name)
153

8
describe_path_and_dir(_code, s->code->name)
154
155
491
static DTOR(shred_dtor) {
156
491
  if(ME(o)) {
157
491
    MUTEX_TYPE mutex = ME(o)->tick->shreduler->mutex;
158
491
    MUTEX_LOCK(mutex);
159
491
    free_vm_shred(ME(o));
160
491
    MUTEX_UNLOCK(mutex);
161
  }
162
491
}
163
164
1
static MFUN(shred_lock) {
165
1
  MUTEX_LOCK(ME(o)->tick->shreduler->mutex);
166
1
}
167
168
1
static MFUN(shred_unlock) {
169
1
  MUTEX_UNLOCK(ME(o)->tick->shreduler->mutex);
170
1
}
171
172
11
static void stop(const M_Object o) {
173
11
  VM *vm = ME(o)->info->vm;
174
11
  MUTEX_LOCK(vm->shreduler->mutex);
175
11
  vm->shreduler->bbq->is_running = 0;
176
11
  *(m_int*)(o->data + o_shred_cancel) = 1;
177
11
  MUTEX_UNLOCK(vm->shreduler->mutex);
178
11
}
179
180
7
static void join(const M_Object o) {
181
7
  if(FORK_THREAD(o)) {
182
7
    THREAD_JOIN(FORK_THREAD(o));
183
7
    FORK_THREAD(o) = 0;
184
  }
185
7
}
186
187
4
static DTOR(fork_dtor) {
188
4
  *(m_int*)(o->data + o_fork_done) = 1;
189
4
  stop(o);
190
4
  VM *parent = ME(o)->info->vm->parent;
191
4
  MUTEX_LOCK(parent->shreduler->mutex);
192
4
  if(parent->gwion->data->child.ptr) {
193
4
    const m_int idx = vector_find(&parent->gwion->data->child, (vtype)o);
194
4
    if(idx > -1)
195
4
    VPTR(&parent->gwion->data->child, idx) = 0;
196
  }
197
4
  if(!parent->gwion->data->child2.ptr)
198
4
    vector_init(&parent->gwion->data->child2);
199
4
  vector_add(&parent->gwion->data->child2, (vtype)ME(o)->info->vm->gwion);
200
4
  REM_REF(ME(o)->code, ME(o)->info->vm->gwion);
201
4
  MUTEX_UNLOCK(parent->shreduler->mutex);
202
4
}
203
204
3
static MFUN(fork_join) {
205
3
  if(*(m_int*)(o->data + o_fork_done))
206
1
    return;
207
2
  shreduler_remove(shred->tick->shreduler, shred, 0);
208
2
  vector_add(EV_SHREDS(*(M_Object*)(o->data + o_fork_ev)), (vtype)shred);
209
}
210
211
4
static MFUN(shred_cancel) {
212
4
  MUTEX_LOCK(ME(o)->tick->shreduler->mutex);
213
4
  *(m_int*)(o->data + o_shred_cancel) = *(m_int*)MEM(SZ_INT);
214
4
  MUTEX_UNLOCK(ME(o)->tick->shreduler->mutex);
215
4
}
216
217
2
static MFUN(shred_test_cancel) {
218
2
  if(*(m_int*)(o->data + o_shred_cancel))
219
1
    vm_shred_exit(ME(o));
220
2
}
221
222
1
static MFUN(fork_test_cancel) {
223
1
  if(*(m_int*)(o->data + o_shred_cancel)) {
224
    stop(o);
225
    join(o);
226
    _release(o, ME(o));
227
    vm_shred_exit(ME(o));
228
  }
229
1
}
230
231
static MFUN(shred_now) {
232
  VM *vm = shred->info->vm;
233
  while(vm->parent)
234
    vm = vm->parent;
235
  *(m_float*)RETURN = vm->bbq->pos;
236
}
237
238
struct ThreadLauncher {
239
  MUTEX_TYPE mutex;
240
  THREAD_COND_TYPE cond;
241
  VM *vm;
242
};
243
244
2880023
static inline int fork_running(VM *vm, const M_Object o) {
245
2880023
  MUTEX_LOCK(vm->shreduler->mutex);
246

2880023
  const int ret = vm->bbq->is_running && !*(m_int*)(o->data + o_shred_cancel);
247
2880023
  MUTEX_UNLOCK(vm->shreduler->mutex);
248
2880023
  return ret;
249
}
250
251
7
static ANN THREAD_FUNC(fork_run) {
252
7
struct ThreadLauncher *tl = data;
253
7
  VM *vm = tl->vm;
254
7
  MUTEX_TYPE mutex = tl->mutex;
255
7
  const M_Object me = vm->shreduler->list->self->info->me;
256
7
  ++me->ref;
257
7
  MUTEX_COND_LOCK(mutex);
258
7
  THREAD_COND_SIGNAL(FORK_COND(me));
259
7
  MUTEX_COND_UNLOCK(mutex);
260
2880030
  while(fork_running(vm, me)) {
261
2880016
    vm_run(vm);
262
2880016
    ++vm->bbq->pos;
263
  }
264
7
  memcpy(me->data + vm->gwion->type[et_fork]->nspc->info->offset, *(m_bit**)ME(me)->reg, FORK_RETSIZE(me));
265
7
  gwion_end_child(ME(me), vm->gwion);
266
7
  MUTEX_LOCK(vm->parent->shreduler->mutex);
267
7
  *(m_int*)(me->data + o_fork_done) = 1;
268
7
  if(!*(m_int*)(me->data + o_shred_cancel))
269
4
    broadcast(*(M_Object*)(me->data + o_fork_ev));
270
7
  MUTEX_UNLOCK(vm->parent->shreduler->mutex);
271
7
  THREAD_RETURN(0);
272
}
273
274
7
ANN void fork_launch(const M_Object o, const m_uint sz) {
275
7
  FORK_RETSIZE(o) = sz;
276
7
  MUTEX_SETUP(FORK_MUTEX(o));
277
7
  THREAD_COND_SETUP(FORK_COND(o));
278
7
  struct ThreadLauncher tl = { .mutex=FORK_MUTEX(o), .cond=FORK_COND(o), .vm=ME(o)->info->vm };
279
7
  MUTEX_COND_LOCK(tl.mutex);
280
7
  THREAD_CREATE(FORK_THREAD(o), fork_run, &tl);
281
7
  THREAD_COND_WAIT(FORK_COND(o), tl.mutex);
282
7
  MUTEX_COND_UNLOCK(tl.mutex);
283
7
  THREAD_COND_CLEANUP(FORK_COND(o));
284
7
  MUTEX_CLEANUP(FORK_MUTEX(o));
285
7
}
286
287
7
ANN void fork_clean(const VM_Shred shred, const Vector v) {
288
14
  for(m_uint i = 0; i < vector_size(v); ++i) {
289
7
    const M_Object o = (M_Object)vector_at(v, i);
290
7
    if(!o)
291
      continue;
292
7
    stop(o);
293
  }
294
14
  for(m_uint i = 0; i < vector_size(v); ++i) {
295
7
    const M_Object o = (M_Object)vector_at(v, i);
296
7
    if(!o)
297
      continue;
298
7
    join(o);
299
  }
300
14
  for(m_uint i = 0; i < vector_size(v); ++i) {
301
7
    const M_Object o = (M_Object)vector_at(v, i);
302
7
    if(!o)
303
      continue;
304
7
    _release(o, shred);
305
   }
306
7
  vector_release(v);
307
7
  v->ptr = NULL;
308
7
}
309
310
711
GWION_IMPORT(shred) {
311
711
  const Type t_shred = gwi_class_ini(gwi,  "Shred", NULL);
312
711
  gwi_class_xtor(gwi, NULL, shred_dtor);
313
314
711
  gwi_item_ini(gwi, "@internal", "@me");
315
711
  GWI_BB(gwi_item_end(gwi, ae_flag_const, NULL))
316
317
711
  gwi_item_ini(gwi, "int", "cancel");
318
711
  GWI_BB((o_shred_cancel = gwi_item_end(gwi, ae_flag_const, NULL)))
319
320
711
  gwi_func_ini(gwi, "void", "exit");
321
711
  GWI_BB(gwi_func_end(gwi, gw_shred_exit, ae_flag_none))
322
323
711
  gwi_func_ini(gwi, "int", "running");
324
711
  GWI_BB(gwi_func_end(gwi, vm_shred_is_running, ae_flag_none))
325
326
711
  gwi_func_ini(gwi, "int", "done");
327
711
  GWI_BB(gwi_func_end(gwi, vm_shred_is_done, ae_flag_none))
328
329
711
  gwi_func_ini(gwi, "int", "id");
330
711
  GWI_BB(gwi_func_end(gwi, vm_shred_id, ae_flag_none))
331
332
711
  gwi_func_ini(gwi, "Shred", "fromId");
333
711
  gwi_func_arg(gwi, "int", "xid");
334
711
  GWI_BB(gwi_func_end(gwi, vm_shred_from_id, ae_flag_static))
335
336
711
  gwi_func_ini(gwi, "void", "yield");
337
711
  GWI_BB(gwi_func_end(gwi, shred_yield, ae_flag_none))
338
339
711
  gwi_func_ini(gwi, "int", "args");
340
711
  GWI_BB(gwi_func_end(gwi, shred_args, ae_flag_none))
341
342
711
  gwi_func_ini(gwi, "string", "arg");
343
711
  gwi_func_arg(gwi, "int", "n");
344
711
  GWI_BB(gwi_func_end(gwi, shred_arg, ae_flag_none))
345
346
711
  gwi_func_ini(gwi, "string", "name");
347
711
  GWI_BB(gwi_func_end(gwi, shred_name, ae_flag_none))
348
349
711
  gwi_func_ini(gwi, "string", "path");
350
711
  GWI_BB(gwi_func_end(gwi, shred_path, ae_flag_none))
351
352
711
  gwi_func_ini(gwi, "string", "dir");
353
711
  GWI_BB(gwi_func_end(gwi, shred_dir, ae_flag_none))
354
355
711
  gwi_func_ini(gwi, "string", "code_name");
356
711
  GWI_BB(gwi_func_end(gwi, shred_code_name, ae_flag_none))
357
358
711
  gwi_func_ini(gwi, "string", "code_path");
359
711
  GWI_BB(gwi_func_end(gwi, shred_code_path, ae_flag_none))
360
361
711
  gwi_func_ini(gwi, "string", "code_dir");
362
711
  GWI_BB(gwi_func_end(gwi, shred_code_dir, ae_flag_none))
363
364
711
  gwi_func_ini(gwi, "void", "set_cancel");
365
711
  gwi_func_arg(gwi, "int", "n");
366
711
  GWI_BB(gwi_func_end(gwi, shred_cancel, ae_flag_none))
367
711
  gwi_func_ini(gwi, "void", "test_cancel");
368
711
  GWI_BB(gwi_func_end(gwi, shred_test_cancel, ae_flag_none))
369
711
  gwi_func_ini(gwi, "void", "lock");
370
711
  GWI_BB(gwi_func_end(gwi, shred_lock, ae_flag_none))
371
711
  gwi_func_ini(gwi, "void", "unlock");
372
711
  GWI_BB(gwi_func_end(gwi, shred_unlock, ae_flag_none))
373
711
  gwi_func_ini(gwi, "float", "get_now");
374
711
  GWI_BB(gwi_func_end(gwi, shred_now, ae_flag_none))
375
711
  GWI_BB(gwi_class_end(gwi))
376
711
  gwi_set_global_type(gwi, t_shred, et_shred);
377
378
711
  struct SpecialId_ spid = { .type=t_shred, .exec=RegPushMe, .is_const=1 };
379
711
  gwi_specialid(gwi, "me", &spid);
380
381
711
  SET_FLAG(t_shred, abstract);
382
383
711
  const Type t_fork= gwi_class_ini(gwi,  "Fork", "Shred");
384
711
  gwi_class_xtor(gwi, NULL, fork_dtor);
385
711
  gwi->gwion->type[et_fork] = t_fork;
386
387
711
  gwi_item_ini(gwi, "@internal", "@thread");
388
711
  GWI_BB((o_fork_thread = gwi_item_end(gwi, ae_flag_const, NULL)))
389
711
  gwi_item_ini(gwi, "@internal", "@cond");
390
711
  GWI_BB((o_fork_cond = gwi_item_end(gwi, ae_flag_const, NULL)))
391
711
  gwi_item_ini(gwi, "@internal", "@mutex");
392
711
  GWI_BB((o_fork_mutex = gwi_item_end(gwi, ae_flag_const, NULL)))
393
711
  gwi_item_ini(gwi, "int", "is_done");
394
711
  GWI_BB((o_fork_done = gwi_item_end(gwi, ae_flag_const, NULL)))
395
711
  gwi_item_ini(gwi, "Event", "ev");
396
711
  GWI_BB((o_fork_ev = gwi_item_end(gwi, ae_flag_const, NULL)))
397
711
  gwi_item_ini(gwi, "int", "retsize");
398
711
  GWI_BB((o_fork_retsize = gwi_item_end(gwi, ae_flag_const, NULL)))
399
711
  gwi_func_ini(gwi, "void", "join");
400
711
  GWI_BB(gwi_func_end(gwi, fork_join, ae_flag_none))
401
711
  gwi_func_ini(gwi, "void", "test_cancel");
402
711
  GWI_BB(gwi_func_end(gwi, fork_test_cancel, ae_flag_none))
403
711
  GWI_BB(gwi_class_end(gwi))
404
711
  SET_FLAG((t_fork), abstract);
405
406
711
  const Type t_typed = gwi_class_ini(gwi,  "TypedFork<~A]", "Fork");
407
711
  gwi_item_ini(gwi, "A", "retval");
408
711
  GWI_BB((gwi_item_end(gwi, ae_flag_const, NULL)))
409
711
  GWI_BB(gwi_class_end(gwi))
410
711
  SET_FLAG((t_typed), abstract);
411
412
711
  return GW_OK;
413
}