po_hi_task.c 12.1 KB
Newer Older
1
2
3
4
5
6
7
/*
 * This is a part of PolyORB-HI-C distribution, a minimal
 * middleware written for generated code from AADL models.
 * You should use it with the Ocarina toolsuite.
 *
 * For more informations, please visit http://ocarina.enst.fr
 *
8
 * Copyright (C) 2007-2011, European Space Agency (ESA).
9
10
 */

11
#if defined (RTEMS_POSIX) || defined (POSIX)
12
13
#include <pthread.h>
#include <sched.h>
14
15
#endif

16
17
18
19
20
21
22
23
#include <errno.h>
/* Headers from the executive */

#include <po_hi_config.h>
#include <po_hi_time.h>
#include <po_hi_task.h>
#include <po_hi_debug.h>
#include <po_hi_returns.h>
24
#include <po_hi_types.h>
25
26
27
/* Header files in PolyORB-HI */

#include <deployment.h>	
28

29
/* Header files from generated code */
30

31
32
33
34
35
36
37
38

int nb_tasks; /* number of created tasks */

typedef struct
{
  __po_hi_task_id     id;       /* Identifier of the task in the system */
  __po_hi_time_t      period;
#if defined(RTEMS_POSIX) || defined(POSIX)
39
  __po_hi_time_t      timer;
40
41
42
43
  pthread_t           tid;              /* The pthread_t type used by the
                                           POSIX library */
  pthread_mutex_t     mutex;
  pthread_cond_t      cond;
44
#elif defined(RTEMS_PURE)
45
  rtems_id            ratemon_period;
46
  rtems_id            rtems_id;
47
48
#elif defined(XENO_NATIVE)
  RT_TASK             xeno_id;
49
#endif
50
} __po_hi_task_t;
51
52
53
54
55
56
57
58
59
/*
 * Structure of a task, contains platform-dependent members
 */

__po_hi_task_t tasks[__PO_HI_NB_TASKS];
/* Array which contains all tasks informations */

void __po_hi_wait_for_tasks ()
{
60
61
62
63
64
65
66
67
68
69
70
#if defined (XENO_POSIX) || defined (XENO_NATIVE)
   /*
    * Once initialization has been done, we avoid ALL 
    * potential paging operations that can introduce
    * some indeterministic timing behavior.
    */

   #include <sys/mman.h>
   mlockall(MCL_CURRENT|MCL_FUTURE);
#endif

71
#if defined(RTEMS_POSIX) || defined(POSIX)
72
73
  int i;

74
75
76
77
78
  for (i = 0; i < __PO_HI_NB_TASKS; i++)
    {
      pthread_join( tasks[i].tid , NULL );
    }
#endif
79
80
81
#ifdef RTEMS_PURE
  rtems_task_suspend(RTEMS_SELF);
#endif
82
83
84
85
86
87
88
89
90
}

/*
 * compute next period for a task
 * The argument is the task-id
 * The task must be a periodic task
 */
int __po_hi_compute_next_period (__po_hi_task_id task)
{
91

92
#if defined(RTEMS_POSIX) || defined(POSIX)
93
94
95
96
97
98
99
100
101
  __po_hi_time_t mytime;

  if (__po_hi_get_time (&mytime) != __PO_HI_SUCCESS)
    {
      return (__PO_HI_ERROR_CLOCK);
    }
  tasks[task].timer = __po_hi_add_times( mytime, tasks[task].period );
  
  return (__PO_HI_SUCCESS);
102
#elif defined (RTEMS_PURE)
103
104
   rtems_status_code ret;
   rtems_name name;
105

106
107
108
   if (tasks[task].ratemon_period == RTEMS_INVALID_ID)
   {
   name = rtems_build_name ('P', 'R', 'D' + (char)task, ' ');
109

110
111
112
113
114
115
116
117
   __DEBUGMSG ("Create monotonic server for task %d\n", task);
   ret = rtems_rate_monotonic_create (name, &(tasks[task].ratemon_period));
   if (ret != RTEMS_SUCCESSFUL)
   {
      __DEBUGMSG ("Error while creating the monotonic server, task=%d, status=%d\n", task, ret);
   }
   }
  return (__PO_HI_SUCCESS);
118
119
120
#else
   return (__PO_HI_UNAVAILABLE);
#endif
121
122
123
124
}

int __po_hi_wait_for_next_period (__po_hi_task_id task)
{
125
#if defined (POSIX) || defined (RTEMS_POSIX) || defined (XENO_POSIX)
126
127
128
129
130
131
132
133
  int ret;
  __po_hi_task_delay_until (tasks[task].timer, task);
  if ( (ret = __po_hi_compute_next_period (task)) != 1)
    {
      return (__PO_HI_ERROR_CLOCK);
    }

  return (__PO_HI_SUCCESS);
134
#elif defined (RTEMS_PURE)
135
136
137
   rtems_status_code ret;
/*   ret = rtems_rate_monotonic_period (&tasks[task].ratemon_period, (rtems_interval)tasks[task].period * ); */
   ret = rtems_rate_monotonic_period (tasks[task].ratemon_period, tasks[task].period / _TOD_Microseconds_per_tick); 
138

139
140
141
142
143
144
   switch (ret)
   {
      case RTEMS_SUCCESSFUL:
         return (__PO_HI_SUCCESS);
         break;
      case RTEMS_TIMEOUT:
145
         __DEBUGMSG ("Error in rtems_rate_monotonic_period (TIMEOUT, task = %d)\n", task);
146
147
148
         return (__PO_HI_ERROR_TASK_PERIOD);
         break;
      default:
149
         __DEBUGMSG ("Error in rtems_rate_monotonic_period (unknown, error code=%d, task=%d)\n", ret, task);
150
         return (__PO_HI_ERROR_UNKNOWN);
151
152
         break;
   }
153

154
   return (__PO_HI_UNAVAILABLE);
155
156
157
158
159
160
161
#elif defined (XENO_NATIVE)
   if ( ! rt_task_wait_period (NULL))
   {
         return (__PO_HI_ERROR_TASK_PERIOD);
   }

   return (__PO_HI_SUCCESS);
162
163
164
#else
  return (__PO_HI_UNAVAILABLE);
#endif
165
166
167
168
169
170
171
172
173
174
}

int __po_hi_initialize_tasking( )
{
  int i;

  for (i = 0; i < __PO_HI_NB_TASKS; i++)
  {
     tasks[i].period = 0;
     tasks[i].id     = invalid_task_id; 
175
176
177
#ifdef RTEMS_PURE
      tasks[i].ratemon_period = RTEMS_INVALID_ID;
#endif
178
179
180
181
182
183
184
185
186
187
188
189
190
  }

  nb_tasks = 0;

  return (__PO_HI_SUCCESS);
}

/*
 * For each kind of system, we declare a generic function that
 * create a thread. For POSIX-compliant systems, the function
 * is called __po_hi_posix_create_thread
 */

191
#if defined (POSIX) || defined (RTEMS_POSIX) || defined (XENO_POSIX)
192
193
194
195
196
197
198
pthread_t __po_hi_posix_create_thread (__po_hi_priority_t priority, 
                                       __po_hi_stack_t    stack_size,
				       void*              (*start_routine)(void))
{
  int                policy;
  pthread_t          tid;
  pthread_attr_t     attr;
199
  struct sched_param param;
200
201
202
203
204
205

  if (pthread_attr_init (&attr) != 0)
    {
      return ((pthread_t)__PO_HI_ERROR_PTHREAD_ATTR);
    }

206
#if defined (POSIX) or defined (XENO_POSIX)
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  if (pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM) != 0)
    {
      return ((pthread_t)__PO_HI_ERROR_PTHREAD_ATTR);
    }
  if (stack_size != 0)
    {
      if (pthread_attr_setstacksize (&attr, stack_size) != 0)
	{
	  return ((pthread_t)__PO_HI_ERROR_PTHREAD_ATTR);
      }
    }
#elif defined (RTEMS_POSIX)
  if (pthread_attr_setscope (&attr, PTHREAD_SCOPE_PROCESS) != 0)
  {
    return ((pthread_t)__PO_HI_ERROR_PTHREAD_ATTR);
  }
#endif

  if (pthread_create (&tid, &attr, (void* (*)(void*))start_routine, NULL) != 0)
    {
      return ((pthread_t)__PO_HI_ERROR_PTHREAD_CREATE);
    }

julien.delange's avatar
julien.delange committed
230
  policy = SCHED_RR;
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
  param.sched_priority = priority;

#ifdef __PO_HI_DEBUG
  if (priority < sched_get_priority_min (policy))
  {
      __DEBUGMSG("PRIORITY IS TOO LOW\n");
  }

  if (priority > sched_get_priority_max (policy))
  {
      __DEBUGMSG("PRIORITY IS TOO HIGH\n");
  }
#endif

  /*
   * We print a message that the user has to be root on
   * its computer. In fact, most of the time, the
   * function pthread_setschedparam fails because
   * the user is not root. On many systems, only root
   * can change the priority of the threads.
   */

  if (pthread_setschedparam (tid, policy, &param)!=0)
    {
#ifdef __PO_HI_DEBUG
      __DEBUGMSG("CANNOT SET PRIORITY FOR TASK %d\n" , nb_tasks );
      __DEBUGMSG("IF YOU ARE USING POSIX IMPLEMENTATION\n");
      __DEBUGMSG("BE SURE TO BE LOGGED AS ROOT\n");
#endif
    }

  return tid;
}

int __po_hi_posix_initialize_task (__po_hi_task_t* task)
{
        if (pthread_mutex_init (&(task->mutex), NULL) != 0)
        {
                return (__PO_HI_ERROR_PTHREAD_MUTEX);
        }

        if (pthread_cond_init (&(task->cond), NULL) != 0)
        {
                return (__PO_HI_ERROR_PTHREAD_COND);
        }
        return (__PO_HI_SUCCESS);
}
278
279
280
281
282

#endif /* POSIX || RTEMS_POSIX */


#ifdef RTEMS_PURE
283
284
285
rtems_id __po_hi_rtems_create_thread (__po_hi_priority_t priority, 
                                      __po_hi_stack_t    stack_size,
                                      void*              (*start_routine)(void))
286
{
287
288
  rtems_id rid;
   if (rtems_task_create (rtems_build_name( 'T', 'A', nb_tasks, ' ' ), 1, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES | RTEMS_FLOATING_POINT, &rid) != RTEMS_SUCCESSFUL)
289
290
   {
      __DEBUGMSG ("ERROR when creating the task\n");
291
      return __PO_HI_ERROR_CREATE_TASK;
292
293
   }

294
  if (rtems_task_start (rid, (rtems_task_entry)start_routine, 0 ) != RTEMS_SUCCESSFUL)
295
296
  {
      __DEBUGMSG ("ERROR when starting the task\n");
297
      return __PO_HI_ERROR_CREATE_TASK;
298
299
  }

300
   return rid;
301
302
303
}
#endif

304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
#ifdef XENO_NATIVE
RT_TASK __po_hi_xenomai_create_thread (__po_hi_priority_t priority, 
                                     __po_hi_stack_t    stack_size,
                                     void*              (*start_routine)(void))
{
   RT_TASK newtask;

   if (! rt_task_create (&newtask, NULL, stack_size, priority, 0))
   {
      __DEBUGMSG ("ERROR when creating the task\n");
   }
   if ( ! rt_task_start (&newtask, (void*)start_routine, NULL))
   {
      __DEBUGMSG ("ERROR when starting the task\n");
   }

   return newtask;
}
#endif



326

327
328
329
330
331
332
333
334
335
336

int __po_hi_create_generic_task (__po_hi_task_id    id, 
                                 __po_hi_time_t     period, 
                                 __po_hi_priority_t priority, 
                                 __po_hi_stack_t   stack_size,
                                 void*              (*start_routine)(void))
{
  __po_hi_task_t* my_task;
  if (id == -1) 
    {
337
#if defined (POSIX) || defined (RTEMS_POSIX) || defined (XENO_POSIX)
338
      __po_hi_posix_create_thread (priority, stack_size, start_routine);
339
      return (__PO_HI_SUCCESS);
340
341
342
#elif defined (XENO_NATIVE)
      __po_hi_xenomai_create_thread (priority, stack_size, start_routine);
      return (__PO_HI_SUCCESS);
343
344
345
346
347
348
#elif defined (RTEMS_PURE)
      __po_hi_rtems_create_thread (priority, stack_size, start_routine);
      return (__PO_HI_SUCCESS);
#else
      return (__PO_HI_UNAVAILABLE);
#endif
349
350
351
352
353
354
    } 
  else
    {
      my_task         = &(tasks[id]);
      my_task->period = period;
      my_task->id     = id;
355
     
356
#if defined (POSIX) || defined (RTEMS_POSIX)
357
358
      my_task->tid    = __po_hi_posix_create_thread (priority, stack_size, start_routine);
      __po_hi_posix_initialize_task (my_task);
359
#elif defined (RTEMS_PURE)
360
      my_task->rtems_id = __po_hi_rtems_create_thread (priority, stack_size, start_routine);
361
362
#elif defined (XENO_NATIVE)
      my_task->xeno_id = __po_hi_xenomai_create_thread (priority, stack_size, start_routine);
363
364
365
#else
      return (__PO_HI_UNAVAILABLE);
#endif
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
      nb_tasks++;
    }

  return (__PO_HI_SUCCESS);
}

int __po_hi_create_periodic_task (__po_hi_task_id    id, 
				  __po_hi_time_t     period, 
				  __po_hi_priority_t priority, 
				  __po_hi_stack_t    stack_size,
				  void*              (*start_routine)(void))
{
  if (__po_hi_create_generic_task( id, period , priority , stack_size, start_routine ) != 1)
    {
      return (__PO_HI_ERROR_CREATE_TASK);
    }

  /*
   * Compute the next period of the task, using the 
   *__po_hi_time* functions.
   */
387
#if defined (RTEMS_POSIX) || defined (POSIX) || defined (XENO_POSIX)
388
389
390
391
  if (__po_hi_compute_next_period (id) != __PO_HI_SUCCESS)
    {
      return (__PO_HI_ERROR_CLOCK);
    }
392
393
394
395
396
#elif defined (XENO_NATIVE)
   if (! rt_task_set_periodic (&(tasks[id].xeno_id), TM_NOW, tasks[id].period * 1000000))
   {
      return (__PO_HI_ERROR_CLOCK);
   }
397
#endif
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
    
  return (__PO_HI_SUCCESS);
}

int __po_hi_create_sporadic_task (__po_hi_task_id    id,
				  __po_hi_time_t     period, 
				  __po_hi_priority_t priority, 
				  __po_hi_stack_t    stack_size,
				  void*              (*start_routine)(void) )
{
  /*
   * Create generic task which will execute the routine given in the
   * last parameter. Typically, a sporadic thread will wait on a
   * mutex.
   */
  if (__po_hi_create_generic_task( id, period , priority , stack_size, start_routine ) != 1)
    {
      return (__PO_HI_ERROR_CREATE_TASK);
    }
  
  return (__PO_HI_SUCCESS);
}

int __po_hi_task_delay_until (__po_hi_time_t time, __po_hi_task_id task)
{
423
#if defined (POSIX) || defined (RTEMS_POSIX)
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
  struct timespec timer;
  int ret;

  timer.tv_sec = time / 1000000;
  
  timer.tv_nsec = (time - (timer.tv_sec*1000000)) * 1000;

  pthread_mutex_lock (&tasks[task].mutex);
  
  ret = pthread_cond_timedwait (&tasks[task].cond, &tasks[task].mutex, &timer);

  if ( (ret != 0) && (ret != ETIMEDOUT))
    {
      ret = __PO_HI_ERROR_PTHREAD_COND;
    }
  else
    {
      ret = __PO_HI_SUCCESS;
    }

  pthread_mutex_unlock (&tasks[task].mutex);

  return (ret);
447
448
#endif
  return (__PO_HI_UNAVAILABLE);
449
}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465

void __po_hi_tasks_killall ()
{
   int i;
   for (i = 0; i < __PO_HI_NB_TASKS; i++)
    {
       __DEBUGMSG ("Kill task %d\n", i);
#ifdef RTEMS_PURE
      rtems_task_delete (tasks[i].rtems_id);
#endif
#if defined (POSIX) || defined (RTEMS_POSIX)
      pthread_cancel (tasks[i].tid);
      __DEBUGMSG ("[TASKS] Cancel thread %d\n", (int) tasks[i].tid);
#endif
    }
}