• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • Examples
  • File List
  • Globals

include/nucleus/pod.h

Go to the documentation of this file.
00001 
00028 #ifndef _XENO_NUCLEUS_POD_H
00029 #define _XENO_NUCLEUS_POD_H
00030 
00034 #include <nucleus/sched.h>
00035 
00036 /* Pod status flags */
00037 #define XNFATAL  0x00000001     /* Fatal error in progress */
00038 #define XNPEXEC  0x00000002     /* Pod is active (a skin is attached) */
00039 
00040 /* These flags are available to the real-time interfaces */
00041 #define XNPOD_SPARE0  0x01000000
00042 #define XNPOD_SPARE1  0x02000000
00043 #define XNPOD_SPARE2  0x04000000
00044 #define XNPOD_SPARE3  0x08000000
00045 #define XNPOD_SPARE4  0x10000000
00046 #define XNPOD_SPARE5  0x20000000
00047 #define XNPOD_SPARE6  0x40000000
00048 #define XNPOD_SPARE7  0x80000000
00049 
00050 #define XNPOD_NORMAL_EXIT  0x0
00051 #define XNPOD_FATAL_EXIT   0x1
00052 
00053 #define XNPOD_ALL_CPUS  XNARCH_CPU_MASK_ALL
00054 
00055 #define XNPOD_FATAL_BUFSZ  16384
00056 
00057 #define nkpod (&nkpod_struct)
00058 
00059 struct xnsynch;
00060 
00067 struct xnpod {
00068 
00069         xnflags_t status;       
00071         xnsched_t sched[XNARCH_NR_CPUS];        
00073         xnqueue_t threadq;      
00074 #ifdef CONFIG_XENO_OPT_VFILE
00075         struct xnvfile_rev_tag threadlist_tag;
00076 #endif
00077         xnqueue_t tstartq,      
00078          tswitchq,              
00079          tdeleteq;              
00081         atomic_counter_t timerlck;      
00083         xntimer_t tslicer;      
00084         int tsliced;            
00086         int refcnt;             
00088 #ifdef __XENO_SIM__
00089         void (*schedhook) (xnthread_t *thread, xnflags_t mask); 
00090 #endif  /* __XENO_SIM__ */
00091 };
00092 
00093 typedef struct xnpod xnpod_t;
00094 
00095 DECLARE_EXTERN_XNLOCK(nklock);
00096 
00097 extern u_long nklatency;
00098 
00099 extern u_long nktimerlat;
00100 
00101 extern xnarch_cpumask_t nkaffinity;
00102 
00103 extern xnpod_t nkpod_struct;
00104 
00105 #ifdef CONFIG_XENO_OPT_VFILE
00106 int xnpod_init_proc(void);
00107 void xnpod_cleanup_proc(void);
00108 #else /* !CONFIG_XENO_OPT_VFILE */
00109 static inline int xnpod_init_proc(void) { return 0; }
00110 static inline void xnpod_cleanup_proc(void) {}
00111 #endif /* !CONFIG_XENO_OPT_VFILE */
00112 
00113 static inline int xnpod_mount(void)
00114 {
00115         xnsched_register_classes();
00116         return xnpod_init_proc();
00117 }
00118 
00119 static inline void xnpod_umount(void)
00120 {
00121         xnpod_cleanup_proc();
00122 }
00123 
00124 #ifdef __cplusplus
00125 extern "C" {
00126 #endif
00127 
00128 int __xnpod_set_thread_schedparam(struct xnthread *thread,
00129                                   struct xnsched_class *sched_class,
00130                                   const union xnsched_policy_param *sched_param,
00131                                   int propagate);
00132 
00133 void __xnpod_reset_thread(struct xnthread *thread);
00134 
00135 #ifdef CONFIG_XENO_HW_FPU
00136 void xnpod_switch_fpu(xnsched_t *sched);
00137 #endif /* CONFIG_XENO_HW_FPU */
00138 
00139 void __xnpod_schedule(struct xnsched *sched);
00140 
00141         /* -- Beginning of the exported interface */
00142 
00143 #define xnpod_sched_slot(cpu) \
00144     (&nkpod->sched[cpu])
00145 
00146 #define xnpod_current_sched() \
00147     xnpod_sched_slot(xnarch_current_cpu())
00148 
00149 #define xnpod_active_p() \
00150     testbits(nkpod->status, XNPEXEC)
00151 
00152 #define xnpod_fatal_p() \
00153     testbits(nkpod->status, XNFATAL)
00154 
00155 #define xnpod_interrupt_p() \
00156     testbits(xnpod_current_sched()->lflags, XNINIRQ)
00157 
00158 #define xnpod_callout_p() \
00159     testbits(xnpod_current_sched()->status, XNKCOUT)
00160 
00161 #define xnpod_asynch_p() \
00162         ({                                                              \
00163                 xnsched_t *sched = xnpod_current_sched();               \
00164                 testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ); \
00165         })
00166 
00167 #define xnpod_current_thread() \
00168     (xnpod_current_sched()->curr)
00169 
00170 #define xnpod_current_root() \
00171     (&xnpod_current_sched()->rootcb)
00172 
00173 #ifdef CONFIG_XENO_OPT_PERVASIVE
00174 #define xnpod_current_p(thread)                                         \
00175     ({ int __shadow_p = xnthread_test_state(thread, XNSHADOW);          \
00176        int __curr_p = __shadow_p ? xnshadow_thread(current) == thread   \
00177            : thread == xnpod_current_thread();                          \
00178        __curr_p;})
00179 #else
00180 #define xnpod_current_p(thread) \
00181     (xnpod_current_thread() == (thread))
00182 #endif
00183 
00184 #define xnpod_locked_p() \
00185     xnthread_test_state(xnpod_current_thread(), XNLOCK)
00186 
00187 #define xnpod_unblockable_p() \
00188     (xnpod_asynch_p() || xnthread_test_state(xnpod_current_thread(), XNROOT))
00189 
00190 #define xnpod_root_p() \
00191     xnthread_test_state(xnpod_current_thread(),XNROOT)
00192 
00193 #define xnpod_shadow_p() \
00194     xnthread_test_state(xnpod_current_thread(),XNSHADOW)
00195 
00196 #define xnpod_userspace_p() \
00197     xnthread_test_state(xnpod_current_thread(),XNROOT|XNSHADOW)
00198 
00199 #define xnpod_primary_p() \
00200     (!(xnpod_asynch_p() || xnpod_root_p()))
00201 
00202 #define xnpod_secondary_p()     xnpod_root_p()
00203 
00204 #define xnpod_idle_p()          xnpod_root_p()
00205 
00206 int xnpod_init(void);
00207 
00208 int xnpod_enable_timesource(void);
00209 
00210 void xnpod_disable_timesource(void);
00211 
00212 void xnpod_shutdown(int xtype);
00213 
00214 int xnpod_init_thread(struct xnthread *thread,
00215                       const struct xnthread_init_attr *attr,
00216                       struct xnsched_class *sched_class,
00217                       const union xnsched_policy_param *sched_param);
00218 
00219 int xnpod_start_thread(xnthread_t *thread,
00220                        const struct xnthread_start_attr *attr);
00221 
00222 void xnpod_stop_thread(xnthread_t *thread);
00223 
00224 void xnpod_restart_thread(xnthread_t *thread);
00225 
00226 void xnpod_delete_thread(xnthread_t *thread);
00227 
00228 void xnpod_abort_thread(xnthread_t *thread);
00229 
00230 xnflags_t xnpod_set_thread_mode(xnthread_t *thread,
00231                                 xnflags_t clrmask,
00232                                 xnflags_t setmask);
00233 
00234 void xnpod_suspend_thread(xnthread_t *thread,
00235                           xnflags_t mask,
00236                           xnticks_t timeout,
00237                           xntmode_t timeout_mode,
00238                           struct xnsynch *wchan);
00239 
00240 void xnpod_resume_thread(xnthread_t *thread,
00241                          xnflags_t mask);
00242 
00243 int xnpod_unblock_thread(xnthread_t *thread);
00244 
00245 int xnpod_set_thread_schedparam(struct xnthread *thread,
00246                                 struct xnsched_class *sched_class,
00247                                 const union xnsched_policy_param *sched_param);
00248 
00249 int xnpod_migrate_thread(int cpu);
00250 
00251 void xnpod_dispatch_signals(void);
00252 
00253 static inline void xnpod_schedule(void)
00254 {
00255         struct xnsched *sched;
00256         /*
00257          * NOTE: Since __xnpod_schedule() won't run if an escalation
00258          * to primary domain is needed, we won't use critical
00259          * scheduler information before we actually run in primary
00260          * mode; therefore we can first test the scheduler status then
00261          * escalate.  Running in the primary domain means that no
00262          * Linux-triggered CPU migration may occur from that point
00263          * either. Finally, since migration is always a self-directed
00264          * operation for Xenomai threads, we can safely read the
00265          * scheduler state bits without holding the nklock.
00266          *
00267          * Said differently, if we race here because of a CPU
00268          * migration, it must have been Linux-triggered because we run
00269          * in secondary mode; in which case we will escalate to the
00270          * primary domain, then unwind the current call frame without
00271          * running the rescheduling procedure in
00272          * __xnpod_schedule(). Therefore, the scheduler pointer will
00273          * be either valid, or unused.
00274          */
00275         sched = xnpod_current_sched();
00276         /*
00277          * No immediate rescheduling is possible if an ISR or callout
00278          * context is active, or if we are caught in the middle of a
00279          * unlocked context switch.
00280          */
00281 #if XENO_DEBUG(NUCLEUS)
00282         if (testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ|XNSWLOCK))
00283                 return;
00284 #else /* !XENO_DEBUG(NUCLEUS) */
00285         if (testbits(sched->status | sched->lflags,
00286                      XNKCOUT|XNINIRQ|XNSWLOCK|XNRESCHED) != XNRESCHED)
00287                 return;
00288 #endif /* !XENO_DEBUG(NUCLEUS) */
00289 
00290         __xnpod_schedule(sched);
00291 }
00292 
00293 void xnpod_lock_sched(void);
00294 
00295 void xnpod_unlock_sched(void);
00296 
00297 void xnpod_fire_callouts(xnqueue_t *hookq,
00298                          xnthread_t *thread);
00299 
00300 static inline void xnpod_run_hooks(struct xnqueue *q,
00301                                    struct xnthread *thread, const char *type)
00302 {
00303         if (!emptyq_p(q)) {
00304                 trace_mark(xn_nucleus, thread_callout,
00305                            "thread %p thread_name %s hook %s",
00306                            thread, xnthread_name(thread), type);
00307                 xnpod_fire_callouts(q, thread);
00308         }
00309 }
00310 
00311 int xnpod_set_thread_periodic(xnthread_t *thread,
00312                               xnticks_t idate,
00313                               xnticks_t period);
00314 
00315 int xnpod_wait_thread_period(unsigned long *overruns_r);
00316 
00317 int xnpod_set_thread_tslice(struct xnthread *thread,
00318                             xnticks_t quantum);
00319 
00320 static inline xntime_t xnpod_get_cpu_time(void)
00321 {
00322         return xnarch_get_cpu_time();
00323 }
00324 
00325 int xnpod_add_hook(int type, void (*routine) (xnthread_t *));
00326 
00327 int xnpod_remove_hook(int type, void (*routine) (xnthread_t *));
00328 
00329 static inline void xnpod_yield(void)
00330 {
00331         xnpod_resume_thread(xnpod_current_thread(), 0);
00332         xnpod_schedule();
00333 }
00334 
00335 static inline void xnpod_delay(xnticks_t timeout)
00336 {
00337         xnpod_suspend_thread(xnpod_current_thread(), XNDELAY, timeout, XN_RELATIVE, NULL);
00338 }
00339 
00340 static inline void xnpod_suspend_self(void)
00341 {
00342         xnpod_suspend_thread(xnpod_current_thread(), XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
00343 }
00344 
00345 static inline void xnpod_delete_self(void)
00346 {
00347         xnpod_delete_thread(xnpod_current_thread());
00348 }
00349 
00350 #ifdef __cplusplus
00351 }
00352 #endif
00353 
00356 #endif /* !_XENO_NUCLEUS_POD_H */

Generated on Wed Nov 2 2011 18:01:06 for Xenomai API by  doxygen 1.7.1