GCC Code Coverage Report
Directory: src/ Exec Total Coverage
File: src/lib/shred.c Lines: 269 277 97.1 %
Date: 2020-10-03 10:30:04 Branches: 49 68 72.1 %

Line Branch Exec Source
1
#include "gwion_util.h"
2
#include "gwion_ast.h"
3
#include "gwion_env.h"
4
#include "gwion_thread.h"
5
#include "vm.h"
6
#include "instr.h"
7
#include "object.h"
8
#include "shreduler_private.h"
9
#include "gwion.h"
10
#include "operator.h"
11
#include "import.h"
12
#include "emit.h"
13
#include "specialid.h"
14
#include "gwi.h"
15
16
static m_int o_fork_thread, o_fork_cond, o_fork_mutex, o_shred_cancel, o_fork_done, o_fork_ev, o_fork_retsize;
17
18
#define FORK_THREAD(o) *(THREAD_TYPE*)(o->data + o_fork_thread)
19
#define FORK_COND(o) *(THREAD_COND_TYPE*)(o->data + o_fork_cond)
20
#define FORK_MUTEX(o) *(MUTEX_TYPE*)(o->data + o_fork_mutex)
21
#define FORK_RETSIZE(o) *(m_int*)(o->data + o_fork_retsize)
22
23
83
VM_Shred new_shred_base(const VM_Shred shred, const VM_Code code) {
24
83
  const VM_Shred sh = new_vm_shred(shred->info->mp, code);
25
83
  ADD_REF(code)
26
83
  sh->base = shred->base;
27
83
  return sh;
28
}
29
30
1209
M_Object new_shred(const VM_Shred shred) {
31
1209
  const M_Object obj = new_object(shred->info->mp, NULL,
32
1209
    shred->info->vm->gwion->type[et_shred]);
33
1209
  ME(obj) = shred;
34
1209
  return obj;
35
}
36
37
8
ANN static inline M_Object fork_object(const VM_Shred shred, const Type t) {
38
8
  const Gwion gwion = shred->info->vm->gwion;
39
8
  const M_Object o = new_object(gwion->mp, shred, t);
40
8
  *(M_Object*)(o->data + o_fork_ev) = new_object(gwion->mp, NULL, gwion->type[et_event]);
41
8
  EV_SHREDS(*(M_Object*)(o->data + o_fork_ev)) = new_vector(gwion->mp);
42
8
  return o;
43
}
44
45
8
ANN M_Object new_fork(const VM_Shred shred, const VM_Code code, const Type t) {
46
8
  VM* parent = shred->info->vm;
47
8
  const VM_Shred sh = new_shred_base(shred, code);
48
8
  VM* vm = (sh->info->vm = gwion_cpy(parent));
49
8
  vm->parent = parent;
50
8
  const M_Object o = sh->info->me = fork_object(shred, t);
51
8
  ME(o) = sh;
52
8
  ++o->ref;
53
8
  shreduler_add(vm->shreduler, sh);
54
8
  return o;
55
}
56
57
58
2
static MFUN(gw_shred_exit) {
59
2
  const VM_Shred s = ME(o);
60
2
  s->mem -= SZ_INT;
61
2
  vm_shred_exit(s);
62
2
}
63
64
6
static MFUN(vm_shred_id) {
65
6
  const VM_Shred s = ME(o);
66
6
  *(m_int*)RETURN = s ? (m_int)s->tick->xid : -1;
67
6
}
68
69
2
static MFUN(vm_shred_is_running) {
70
2
  const VM_Shred s = ME(o);
71

2
  *(m_uint*)RETURN = (s->tick->next || s->tick->prev) ? 1 : 0;
72
2
}
73
74
2
static MFUN(vm_shred_is_done) {
75
2
  *(m_uint*)RETURN = ME(o) ? 0 : 1;
76
2
}
77
78
23
static MFUN(shred_yield) {
79
23
  const VM_Shred s = ME(o);
80
23
  const Shreduler sh = s->tick->shreduler;
81
23
  if(s != shred)
82
    shreduler_remove(sh, s, 0);
83
23
  shredule(sh, s, GWION_EPSILON);
84
23
}
85
86
4
static SFUN(vm_shred_from_id) {
87
4
  const m_int index =  *(m_int*)MEM(0);
88
4
  if(index > 0) {
89
2
    for(m_uint i = 0; i < vector_size(&shred->tick->shreduler->shreds); ++i) {
90
2
      const VM_Shred s = (VM_Shred)vector_at(&shred->tick->shreduler->shreds, i);
91
2
      if(s->tick->xid == (m_uint)index) {
92
2
        *(M_Object*)RETURN = s->info->me;
93
2
        return;
94
      }
95
    }
96
  }
97
2
  *(m_uint*)RETURN = 0;
98
}
99
100
5
static MFUN(shred_args) {
101
5
  const VM_Shred s = ME(o);
102
5
  *(m_uint*)RETURN = s->info->args ? vector_size(s->info->args) : 0;
103
5
}
104
105
3
static MFUN(shred_arg) {
106
3
  const VM_Shred s = ME(o);
107
3
  const m_int idx = *(m_int*)MEM(SZ_INT);
108

4
  if(s->info->args && idx >= 0) {
109
1
    const m_str str = (m_str)vector_at(s->info->args, *(m_uint*)MEM(SZ_INT));
110
1
    *(M_Object*)RETURN = str ? new_string(shred->info->mp, shred, str) : NULL;
111
  } else
112
2
    *(m_uint*)RETURN = 0;
113
3
}
114
115
#ifndef BUILD_ON_WINDOWS
116
#define PATH_CHR '/'
117
#else
118
#define PATH_CHR '\\'
119
#endif
120
121
#define describe_name(name, src) \
122
static MFUN(shred##name##_name) { \
123
  const VM_Shred s = ME(o); \
124
  const m_str str = code_name((src), 0); \
125
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, str); \
126
}
127
2
describe_name(, s->info->name)
128
2
describe_name(_code, s->code->name)
129
130
#define describe_path_and_dir(name, src) \
131
static MFUN(shred##name##_path) { \
132
  const VM_Shred s = ME(o); \
133
  const m_str str = code_name((src), 1); \
134
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, str); \
135
} \
136
static MFUN(shred##name##_dir) { \
137
  const VM_Shred  s = ME(o); \
138
  const m_str str = code_name((src), 1); \
139
  const size_t len = strlen(str); \
140
  char c[len + 1]; \
141
  strcpy(c, str); \
142
  size_t sz = len;\
143
  while(sz) {\
144
    if(c[sz] == PATH_CHR) {\
145
      c[sz] = 0;\
146
      break;\
147
    }\
148
    --sz;\
149
  }\
150
  *(m_uint*)RETURN = (m_uint)new_string(shred->info->mp, shred, c); \
151
}
152

8
describe_path_and_dir(, s->info->name)
153

8
describe_path_and_dir(_code, s->code->name)
154
155
502
static DTOR(shred_dtor) {
156
502
  if(ME(o)) {
157
502
    MUTEX_TYPE mutex = ME(o)->tick->shreduler->mutex;
158
502
    MUTEX_LOCK(mutex);
159
502
    free_vm_shred(ME(o));
160
502
    MUTEX_UNLOCK(mutex);
161
  }
162
502
}
163
164
1
static MFUN(shred_lock) {
165
1
  MUTEX_LOCK(ME(o)->tick->shreduler->mutex);
166
1
}
167
168
1
static MFUN(shred_unlock) {
169
1
  MUTEX_UNLOCK(ME(o)->tick->shreduler->mutex);
170
1
}
171
172
14
static void stop(const M_Object o) {
173
14
  VM *vm = ME(o)->info->vm;
174
14
  MUTEX_LOCK(vm->shreduler->mutex);
175
14
  vm->shreduler->bbq->is_running = 0;
176
14
  *(m_int*)(o->data + o_shred_cancel) = 1;
177
14
  MUTEX_UNLOCK(vm->shreduler->mutex);
178
14
}
179
180
8
static void join(const M_Object o) {
181
8
  if(FORK_THREAD(o)) {
182
8
    THREAD_JOIN(FORK_THREAD(o));
183
8
    FORK_THREAD(o) = 0;
184
  }
185
8
}
186
187
6
static DTOR(fork_dtor) {
188
6
  *(m_int*)(o->data + o_fork_done) = 1;
189
6
  stop(o);
190
6
  VM *parent = ME(o)->info->vm->parent;
191
6
  MUTEX_LOCK(parent->shreduler->mutex);
192
6
  if(parent->gwion->data->child.ptr) {
193
6
    const m_int idx = vector_find(&parent->gwion->data->child, (vtype)o);
194
6
    if(idx > -1)
195
6
    VPTR(&parent->gwion->data->child, idx) = 0;
196
  }
197
6
  if(!parent->gwion->data->child2.ptr)
198
6
    vector_init(&parent->gwion->data->child2);
199
6
  vector_add(&parent->gwion->data->child2, (vtype)ME(o)->info->vm->gwion);
200
6
  REM_REF(ME(o)->code, ME(o)->info->vm->gwion);
201
6
  MUTEX_UNLOCK(parent->shreduler->mutex);
202
6
}
203
204
3
static MFUN(fork_join) {
205
3
  if(*(m_int*)(o->data + o_fork_done))
206
1
    return;
207
2
  shreduler_remove(shred->tick->shreduler, shred, 0);
208
2
  vector_add(EV_SHREDS(*(M_Object*)(o->data + o_fork_ev)), (vtype)shred);
209
}
210
211
4
static MFUN(shred_cancel) {
212
4
  MUTEX_LOCK(ME(o)->tick->shreduler->mutex);
213
4
  *(m_int*)(o->data + o_shred_cancel) = *(m_int*)MEM(SZ_INT);
214
4
  MUTEX_UNLOCK(ME(o)->tick->shreduler->mutex);
215
4
}
216
217
2
static MFUN(shred_test_cancel) {
218
2
  if(*(m_int*)(o->data + o_shred_cancel))
219
1
    vm_shred_exit(ME(o));
220
2
}
221
222
1
static MFUN(fork_test_cancel) {
223
1
  if(*(m_int*)(o->data + o_shred_cancel)) {
224
    stop(o);
225
    join(o);
226
    _release(o, ME(o));
227
    vm_shred_exit(ME(o));
228
  }
229
1
}
230
231
1
static MFUN(shred_now) {
232
1
  VM *vm = shred->info->vm;
233
3
  while(vm->parent)
234
1
    vm = vm->parent;
235
1
  *(m_float*)RETURN = vm->bbq->pos;
236
1
}
237
238
struct ThreadLauncher {
239
  MUTEX_TYPE mutex;
240
  THREAD_COND_TYPE cond;
241
  VM *vm;
242
};
243
244
2938209
static inline int fork_running(VM *vm, const M_Object o) {
245
2938209
  MUTEX_LOCK(vm->shreduler->mutex);
246

2938209
  const int ret = vm->bbq->is_running && !*(m_int*)(o->data + o_shred_cancel);
247
2938209
  MUTEX_UNLOCK(vm->shreduler->mutex);
248
2938209
  return ret;
249
}
250
251
8
static ANN THREAD_FUNC(fork_run) {
252
8
struct ThreadLauncher *tl = data;
253
8
  VM *vm = tl->vm;
254
8
  MUTEX_TYPE mutex = tl->mutex;
255
8
  const M_Object me = vm->shreduler->list->self->info->me;
256
8
  ++me->ref;
257
8
  MUTEX_COND_LOCK(mutex);
258
8
  THREAD_COND_SIGNAL(FORK_COND(me));
259
8
  MUTEX_COND_UNLOCK(mutex);
260
2938217
  while(fork_running(vm, me)) {
261
2938201
    vm_run(vm);
262
2938201
    ++vm->bbq->pos;
263
  }
264
8
  memcpy(me->data + vm->gwion->type[et_fork]->nspc->info->offset, *(m_bit**)ME(me)->reg, FORK_RETSIZE(me));
265
8
  gwion_end_child(ME(me), vm->gwion);
266
8
  MUTEX_LOCK(vm->parent->shreduler->mutex);
267
8
  *(m_int*)(me->data + o_fork_done) = 1;
268
8
  if(!*(m_int*)(me->data + o_shred_cancel))
269
4
    broadcast(*(M_Object*)(me->data + o_fork_ev));
270
8
  MUTEX_UNLOCK(vm->parent->shreduler->mutex);
271
8
  THREAD_RETURN(0);
272
}
273
274
8
ANN void fork_launch(const M_Object o, const m_uint sz) {
275
8
  FORK_RETSIZE(o) = sz;
276
8
  MUTEX_SETUP(FORK_MUTEX(o));
277
8
  THREAD_COND_SETUP(FORK_COND(o));
278
8
  struct ThreadLauncher tl = { .mutex=FORK_MUTEX(o), .cond=FORK_COND(o), .vm=ME(o)->info->vm };
279
8
  MUTEX_COND_LOCK(tl.mutex);
280
8
  THREAD_CREATE(FORK_THREAD(o), fork_run, &tl);
281
8
  THREAD_COND_WAIT(FORK_COND(o), tl.mutex);
282
8
  MUTEX_COND_UNLOCK(tl.mutex);
283
8
  THREAD_COND_CLEANUP(FORK_COND(o));
284
8
  MUTEX_CLEANUP(FORK_MUTEX(o));
285
8
}
286
287
8
ANN void fork_clean(const VM_Shred shred, const Vector v) {
288
16
  for(m_uint i = 0; i < vector_size(v); ++i) {
289
8
    const M_Object o = (M_Object)vector_at(v, i);
290
8
    if(!o)
291
      continue;
292
8
    stop(o);
293
  }
294
16
  for(m_uint i = 0; i < vector_size(v); ++i) {
295
8
    const M_Object o = (M_Object)vector_at(v, i);
296
8
    if(!o)
297
      continue;
298
8
    join(o);
299
  }
300
16
  for(m_uint i = 0; i < vector_size(v); ++i) {
301
8
    const M_Object o = (M_Object)vector_at(v, i);
302
8
    if(!o)
303
      continue;
304
8
    _release(o, shred);
305
   }
306
8
  vector_release(v);
307
8
  v->ptr = NULL;
308
8
}
309
310
713
GWION_IMPORT(shred) {
311
713
  const Type t_shred = gwi_class_ini(gwi,  "Shred", NULL);
312
713
  gwi_class_xtor(gwi, NULL, shred_dtor);
313
314
713
  gwi_item_ini(gwi, "@internal", "@me");
315
713
  GWI_BB(gwi_item_end(gwi, ae_flag_const, NULL))
316
317
713
  gwi_item_ini(gwi, "int", "cancel");
318
713
  GWI_BB((o_shred_cancel = gwi_item_end(gwi, ae_flag_const, NULL)))
319
320
713
  gwi_func_ini(gwi, "void", "exit");
321
713
  GWI_BB(gwi_func_end(gwi, gw_shred_exit, ae_flag_none))
322
323
713
  gwi_func_ini(gwi, "int", "running");
324
713
  GWI_BB(gwi_func_end(gwi, vm_shred_is_running, ae_flag_none))
325
326
713
  gwi_func_ini(gwi, "int", "done");
327
713
  GWI_BB(gwi_func_end(gwi, vm_shred_is_done, ae_flag_none))
328
329
713
  gwi_func_ini(gwi, "int", "id");
330
713
  GWI_BB(gwi_func_end(gwi, vm_shred_id, ae_flag_none))
331
332
713
  gwi_func_ini(gwi, "Shred", "fromId");
333
713
  gwi_func_arg(gwi, "int", "xid");
334
713
  GWI_BB(gwi_func_end(gwi, vm_shred_from_id, ae_flag_static))
335
336
713
  gwi_func_ini(gwi, "void", "yield");
337
713
  GWI_BB(gwi_func_end(gwi, shred_yield, ae_flag_none))
338
339
713
  gwi_func_ini(gwi, "int", "args");
340
713
  GWI_BB(gwi_func_end(gwi, shred_args, ae_flag_none))
341
342
713
  gwi_func_ini(gwi, "string", "arg");
343
713
  gwi_func_arg(gwi, "int", "n");
344
713
  GWI_BB(gwi_func_end(gwi, shred_arg, ae_flag_none))
345
346
713
  gwi_func_ini(gwi, "string", "name");
347
713
  GWI_BB(gwi_func_end(gwi, shred_name, ae_flag_none))
348
349
713
  gwi_func_ini(gwi, "string", "path");
350
713
  GWI_BB(gwi_func_end(gwi, shred_path, ae_flag_none))
351
352
713
  gwi_func_ini(gwi, "string", "dir");
353
713
  GWI_BB(gwi_func_end(gwi, shred_dir, ae_flag_none))
354
355
713
  gwi_func_ini(gwi, "string", "code_name");
356
713
  GWI_BB(gwi_func_end(gwi, shred_code_name, ae_flag_none))
357
358
713
  gwi_func_ini(gwi, "string", "code_path");
359
713
  GWI_BB(gwi_func_end(gwi, shred_code_path, ae_flag_none))
360
361
713
  gwi_func_ini(gwi, "string", "code_dir");
362
713
  GWI_BB(gwi_func_end(gwi, shred_code_dir, ae_flag_none))
363
364
713
  gwi_func_ini(gwi, "void", "set_cancel");
365
713
  gwi_func_arg(gwi, "int", "n");
366
713
  GWI_BB(gwi_func_end(gwi, shred_cancel, ae_flag_none))
367
713
  gwi_func_ini(gwi, "void", "test_cancel");
368
713
  GWI_BB(gwi_func_end(gwi, shred_test_cancel, ae_flag_none))
369
713
  gwi_func_ini(gwi, "void", "lock");
370
713
  GWI_BB(gwi_func_end(gwi, shred_lock, ae_flag_none))
371
713
  gwi_func_ini(gwi, "void", "unlock");
372
713
  GWI_BB(gwi_func_end(gwi, shred_unlock, ae_flag_none))
373
713
  gwi_func_ini(gwi, "float", "get_now");
374
713
  GWI_BB(gwi_func_end(gwi, shred_now, ae_flag_none))
375
713
  GWI_BB(gwi_class_end(gwi))
376
713
  gwi_set_global_type(gwi, t_shred, et_shred);
377
378
713
  struct SpecialId_ spid = { .type=t_shred, .exec=RegPushMe, .is_const=1 };
379
713
  gwi_specialid(gwi, "me", &spid);
380
381
713
  SET_FLAG(t_shred, abstract);
382
383
713
  const Type t_fork= gwi_class_ini(gwi,  "Fork", "Shred");
384
713
  gwi_class_xtor(gwi, NULL, fork_dtor);
385
713
  gwi->gwion->type[et_fork] = t_fork;
386
387
713
  gwi_item_ini(gwi, "@internal", "@thread");
388
713
  GWI_BB((o_fork_thread = gwi_item_end(gwi, ae_flag_const, NULL)))
389
713
  gwi_item_ini(gwi, "@internal", "@cond");
390
713
  GWI_BB((o_fork_cond = gwi_item_end(gwi, ae_flag_const, NULL)))
391
713
  gwi_item_ini(gwi, "@internal", "@mutex");
392
713
  GWI_BB((o_fork_mutex = gwi_item_end(gwi, ae_flag_const, NULL)))
393
713
  gwi_item_ini(gwi, "int", "is_done");
394
713
  GWI_BB((o_fork_done = gwi_item_end(gwi, ae_flag_const, NULL)))
395
713
  gwi_item_ini(gwi, "Event", "ev");
396
713
  GWI_BB((o_fork_ev = gwi_item_end(gwi, ae_flag_const, NULL)))
397
713
  gwi_item_ini(gwi, "int", "retsize");
398
713
  GWI_BB((o_fork_retsize = gwi_item_end(gwi, ae_flag_const, NULL)))
399
713
  gwi_func_ini(gwi, "void", "join");
400
713
  GWI_BB(gwi_func_end(gwi, fork_join, ae_flag_none))
401
713
  gwi_func_ini(gwi, "void", "test_cancel");
402
713
  GWI_BB(gwi_func_end(gwi, fork_test_cancel, ae_flag_none))
403
713
  GWI_BB(gwi_class_end(gwi))
404
713
  SET_FLAG((t_fork), abstract);
405
406
713
  const Type t_typed = gwi_class_ini(gwi,  "TypedFork:[A]", "Fork");
407
713
  gwi_item_ini(gwi, "A", "retval");
408
713
  GWI_BB((gwi_item_end(gwi, ae_flag_const, NULL)))
409
713
  GWI_BB(gwi_class_end(gwi))
410
713
  SET_FLAG((t_typed), abstract);
411
412
713
  return GW_OK;
413
}