00001
00026 #ifndef _RTDM_DRIVER_H
00027 #define _RTDM_DRIVER_H
00028
00029 #ifndef __KERNEL__
00030 #error This header is for kernel space usage only. \
00031 You are likely looking for rtdm/rtdm.h...
00032 #endif
00033
00034 #include <asm/atomic.h>
00035 #include <linux/list.h>
00036
00037 #include <nucleus/xenomai.h>
00038 #include <nucleus/heap.h>
00039 #include <nucleus/pod.h>
00040 #include <nucleus/synch.h>
00041 #include <nucleus/select.h>
00042 #include <nucleus/vfile.h>
00043 #include <rtdm/rtdm.h>
00044
00045
00046 #include <nucleus/assert.h>
00047 #ifdef CONFIG_PCI
00048 #include <asm-generic/xenomai/pci_ids.h>
00049 #endif
00050
00051 #ifndef CONFIG_XENO_OPT_DEBUG_RTDM
00052 #define CONFIG_XENO_OPT_DEBUG_RTDM 0
00053 #endif
00054
00055 struct rtdm_dev_context;
00056 typedef struct xnselector rtdm_selector_t;
00057 enum rtdm_selecttype;
00058
00071 #define RTDM_EXCLUSIVE 0x0001
00072
00074 #define RTDM_NAMED_DEVICE 0x0010
00075
00078 #define RTDM_PROTOCOL_DEVICE 0x0020
00079
00081 #define RTDM_DEVICE_TYPE_MASK 0x00F0
00082
00091 #define RTDM_CREATED_IN_NRT 0
00092
00094 #define RTDM_CLOSING 1
00095
00097 #define RTDM_USER_CONTEXT_FLAG 8
00098
00107 #define RTDM_DEVICE_STRUCT_VER 5
00108
00110 #define RTDM_CONTEXT_STRUCT_VER 3
00111
00113 #define RTDM_SECURE_DEVICE 0x80000000
00114
00116 #define RTDM_DRIVER_VER(major, minor, patch) \
00117 (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF))
00118
00120 #define RTDM_DRIVER_MAJOR_VER(ver) (((ver) >> 16) & 0xFF)
00121
00123 #define RTDM_DRIVER_MINOR_VER(ver) (((ver) >> 8) & 0xFF)
00124
00126 #define RTDM_DRIVER_PATCH_VER(ver) ((ver) & 0xFF)
00127
00139 enum rtdm_selecttype {
00141 RTDM_SELECTTYPE_READ = XNSELECT_READ,
00142
00144 RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
00145
00147 RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
00148 };
00172 typedef int (*rtdm_open_handler_t)(struct rtdm_dev_context *context,
00173 rtdm_user_info_t *user_info, int oflag);
00174
00189 typedef int (*rtdm_socket_handler_t)(struct rtdm_dev_context *context,
00190 rtdm_user_info_t *user_info, int protocol);
00191
00212 typedef int (*rtdm_close_handler_t)(struct rtdm_dev_context *context,
00213 rtdm_user_info_t *user_info);
00214
00230 typedef int (*rtdm_ioctl_handler_t)(struct rtdm_dev_context *context,
00231 rtdm_user_info_t *user_info,
00232 unsigned int request, void __user *arg);
00233
00247 typedef int (*rtdm_select_bind_handler_t)(struct rtdm_dev_context *context,
00248 rtdm_selector_t *selector,
00249 enum rtdm_selecttype type,
00250 unsigned fd_index);
00251
00267 typedef ssize_t (*rtdm_read_handler_t)(struct rtdm_dev_context *context,
00268 rtdm_user_info_t *user_info,
00269 void *buf, size_t nbyte);
00270
00286 typedef ssize_t (*rtdm_write_handler_t)(struct rtdm_dev_context *context,
00287 rtdm_user_info_t *user_info,
00288 const void *buf, size_t nbyte);
00289
00306 typedef ssize_t (*rtdm_recvmsg_handler_t)(struct rtdm_dev_context *context,
00307 rtdm_user_info_t *user_info,
00308 struct msghdr *msg, int flags);
00309
00326 typedef ssize_t (*rtdm_sendmsg_handler_t)(struct rtdm_dev_context *context,
00327 rtdm_user_info_t *user_info,
00328 const struct msghdr *msg, int flags);
00331 typedef int (*rtdm_rt_handler_t)(struct rtdm_dev_context *context,
00332 rtdm_user_info_t *user_info, void *arg);
00336 struct rtdm_operations {
00341 rtdm_close_handler_t close_rt;
00343 rtdm_close_handler_t close_nrt;
00344
00346 rtdm_ioctl_handler_t ioctl_rt;
00348 rtdm_ioctl_handler_t ioctl_nrt;
00349
00351 rtdm_select_bind_handler_t select_bind;
00357 rtdm_read_handler_t read_rt;
00359 rtdm_read_handler_t read_nrt;
00360
00362 rtdm_write_handler_t write_rt;
00364 rtdm_write_handler_t write_nrt;
00370 rtdm_recvmsg_handler_t recvmsg_rt;
00372 rtdm_recvmsg_handler_t recvmsg_nrt;
00373
00375 rtdm_sendmsg_handler_t sendmsg_rt;
00377 rtdm_sendmsg_handler_t sendmsg_nrt;
00379 };
00380
00381 struct rtdm_devctx_reserved {
00382 void *owner;
00383 struct list_head cleanup;
00384 };
00385
00397 struct rtdm_dev_context {
00399 unsigned long context_flags;
00400
00402 int fd;
00403
00406 atomic_t close_lock_count;
00407
00409 struct rtdm_operations *ops;
00410
00412 struct rtdm_device *device;
00413
00415 struct rtdm_devctx_reserved reserved;
00416
00418 char dev_private[0];
00419 };
00420
00429 static inline void *
00430 rtdm_context_to_private(struct rtdm_dev_context *context)
00431 {
00432 return (void *)context->dev_private;
00433 }
00434
00443 static inline struct rtdm_dev_context *
00444 rtdm_private_to_context(void *dev_private)
00445 {
00446 return container_of(dev_private, struct rtdm_dev_context, dev_private);
00447 }
00448
00449 struct rtdm_dev_reserved {
00450 struct list_head entry;
00451 atomic_t refcount;
00452 struct rtdm_dev_context *exclusive_context;
00453 };
00454
00462 struct rtdm_device {
00465 int struct_version;
00466
00468 int device_flags;
00470 size_t context_size;
00471
00473 char device_name[RTDM_MAX_DEVNAME_LEN + 1];
00474
00476 int protocol_family;
00478 int socket_type;
00479
00484 rtdm_open_handler_t open_rt;
00487 rtdm_open_handler_t open_nrt;
00488
00494 rtdm_socket_handler_t socket_rt;
00497 rtdm_socket_handler_t socket_nrt;
00498
00500 struct rtdm_operations ops;
00501
00503 int device_class;
00506 int device_sub_class;
00508 int profile_version;
00510 const char *driver_name;
00512 int driver_version;
00515 const char *peripheral_name;
00517 const char *provider_name;
00518
00520 const char *proc_name;
00521 #ifdef CONFIG_XENO_OPT_VFILE
00522
00523 struct xnvfile_directory vfroot;
00524 struct xnvfile_regular info_vfile;
00525 #endif
00526
00528 int device_id;
00530 void *device_data;
00531
00533 struct rtdm_dev_reserved reserved;
00534 };
00537
00538
00539 int rtdm_dev_register(struct rtdm_device *device);
00540 int rtdm_dev_unregister(struct rtdm_device *device, unsigned int poll_delay);
00541
00542
00543
00544 #define rtdm_open rt_dev_open
00545 #define rtdm_socket rt_dev_socket
00546 #define rtdm_close rt_dev_close
00547 #define rtdm_ioctl rt_dev_ioctl
00548 #define rtdm_read rt_dev_read
00549 #define rtdm_write rt_dev_write
00550 #define rtdm_recvmsg rt_dev_recvmsg
00551 #define rtdm_recv rt_dev_recv
00552 #define rtdm_recvfrom rt_dev_recvfrom
00553 #define rtdm_sendmsg rt_dev_sendmsg
00554 #define rtdm_send rt_dev_send
00555 #define rtdm_sendto rt_dev_sendto
00556 #define rtdm_bind rt_dev_bind
00557 #define rtdm_listen rt_dev_listen
00558 #define rtdm_accept rt_dev_accept
00559 #define rtdm_getsockopt rt_dev_getsockopt
00560 #define rtdm_setsockopt rt_dev_setsockopt
00561 #define rtdm_getsockname rt_dev_getsockname
00562 #define rtdm_getpeername rt_dev_getpeername
00563 #define rtdm_shutdown rt_dev_shutdown
00564
00565 struct rtdm_dev_context *rtdm_context_get(int fd);
00566
00567 #ifndef DOXYGEN_CPP
00568
00569 #define CONTEXT_IS_LOCKED(context) \
00570 (atomic_read(&(context)->close_lock_count) > 1 || \
00571 (test_bit(RTDM_CLOSING, &(context)->context_flags) && \
00572 atomic_read(&(context)->close_lock_count) > 0))
00573
00574 static inline void rtdm_context_lock(struct rtdm_dev_context *context)
00575 {
00576 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00577 );
00578 atomic_inc(&context->close_lock_count);
00579 }
00580
00581 extern int rtdm_apc;
00582
00583 static inline void rtdm_context_unlock(struct rtdm_dev_context *context)
00584 {
00585 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00586 );
00587 smp_mb__before_atomic_dec();
00588 if (unlikely(atomic_dec_and_test(&context->close_lock_count)))
00589 rthal_apc_schedule(rtdm_apc);
00590 }
00591
00592 static inline void rtdm_context_put(struct rtdm_dev_context *context)
00593 {
00594 rtdm_context_unlock(context);
00595 }
00596
00597
00598 struct xntbase;
00599 extern struct xntbase *rtdm_tbase;
00600
00601 static inline nanosecs_abs_t rtdm_clock_read(void)
00602 {
00603 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_time(rtdm_tbase));
00604 }
00605
00606 static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
00607 {
00608 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_jiffies(rtdm_tbase));
00609 }
00610 #endif
00611
00617 int rtdm_select_bind(int fd, rtdm_selector_t *selector,
00618 enum rtdm_selecttype type, unsigned fd_index);
00619
00620
00658 #ifdef DOXYGEN_CPP
00659 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00660 { \
00661 <ENTER_ATOMIC_SECTION> \
00662 code_block; \
00663 <LEAVE_ATOMIC_SECTION> \
00664 }
00665 #else
00666 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00667 { \
00668 spl_t __rtdm_s; \
00669 \
00670 xnlock_get_irqsave(&nklock, __rtdm_s); \
00671 code_block; \
00672 xnlock_put_irqrestore(&nklock, __rtdm_s); \
00673 }
00674 #endif
00675
00685 #define RTDM_LOCK_UNLOCKED RTHAL_SPIN_LOCK_UNLOCKED
00686
00688 typedef rthal_spinlock_t rtdm_lock_t;
00689
00691 typedef unsigned long rtdm_lockctx_t;
00692
00708 #define rtdm_lock_init(lock) rthal_spin_lock_init(lock)
00709
00726 #ifdef DOXYGEN_CPP
00727 #define rtdm_lock_get(lock) rthal_spin_lock(lock)
00728 #else
00729 #define rtdm_lock_get(lock) \
00730 do { \
00731 XENO_BUGON(RTDM, !rthal_local_irq_disabled()); \
00732 rthal_spin_lock(lock); \
00733 } while (0)
00734 #endif
00735
00752 #define rtdm_lock_put(lock) rthal_spin_unlock(lock)
00753
00771 #define rtdm_lock_get_irqsave(lock, context) \
00772 rthal_spin_lock_irqsave(lock, context)
00773
00791 #define rtdm_lock_put_irqrestore(lock, context) \
00792 rthal_spin_unlock_irqrestore(lock, context)
00793
00810 #define rtdm_lock_irqsave(context) \
00811 rthal_local_irq_save(context)
00812
00829 #define rtdm_lock_irqrestore(context) \
00830 rthal_local_irq_restore(context)
00831
00835
00841 typedef xnintr_t rtdm_irq_t;
00842
00849 #define RTDM_IRQTYPE_SHARED XN_ISR_SHARED
00850
00852 #define RTDM_IRQTYPE_EDGE XN_ISR_EDGE
00853
00862 typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
00863
00870 #define RTDM_IRQ_NONE XN_ISR_NONE
00871
00872 #define RTDM_IRQ_HANDLED XN_ISR_HANDLED
00873
00892 #define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie)
00893
00895 int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
00896 rtdm_irq_handler_t handler, unsigned long flags,
00897 const char *device_name, void *arg);
00898
00899 #ifndef DOXYGEN_CPP
00900 static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
00901 {
00902 return xnintr_detach(irq_handle);
00903 }
00904
00905 static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
00906 {
00907 return xnintr_enable(irq_handle);
00908 }
00909
00910 static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
00911 {
00912 return xnintr_disable(irq_handle);
00913 }
00914 #endif
00915
00916
00917
00923 typedef unsigned rtdm_nrtsig_t;
00924
00935 typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t nrt_sig, void *arg);
00938 #ifndef DOXYGEN_CPP
00939 static inline int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
00940 rtdm_nrtsig_handler_t handler, void *arg)
00941 {
00942 *nrt_sig = rthal_alloc_virq();
00943
00944 if (*nrt_sig == 0)
00945 return -EAGAIN;
00946
00947 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, handler, arg, NULL,
00948 IPIPE_HANDLE_MASK);
00949 return 0;
00950 }
00951
00952 static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
00953 {
00954 rthal_free_virq(*nrt_sig);
00955 }
00956
00957 static inline void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
00958 {
00959 rthal_trigger_irq(*nrt_sig);
00960 }
00961 #endif
00962
00963
00964
00970 typedef xntimer_t rtdm_timer_t;
00971
00977 typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
00978
00984 enum rtdm_timer_mode {
00986 RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
00987
00989 RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
00990
00992 RTDM_TIMERMODE_REALTIME = XN_REALTIME
00993 };
00998 #ifndef DOXYGEN_CPP
00999 #define rtdm_timer_init(timer, handler, name) \
01000 ({ \
01001 xntimer_init((timer), rtdm_tbase, handler); \
01002 xntimer_set_name((timer), (name)); \
01003 0; \
01004 })
01005 #endif
01006
01007 void rtdm_timer_destroy(rtdm_timer_t *timer);
01008
01009 int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
01010 nanosecs_rel_t interval, enum rtdm_timer_mode mode);
01011
01012 void rtdm_timer_stop(rtdm_timer_t *timer);
01013
01014 #ifndef DOXYGEN_CPP
01015 static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
01016 nanosecs_abs_t expiry,
01017 nanosecs_rel_t interval,
01018 enum rtdm_timer_mode mode)
01019 {
01020 return xntimer_start(timer, xntbase_ns2ticks_ceil(rtdm_tbase, expiry),
01021 xntbase_ns2ticks_ceil(rtdm_tbase, interval),
01022 (xntmode_t)mode);
01023 }
01024
01025 static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
01026 {
01027 xntimer_stop(timer);
01028 }
01029 #endif
01030
01031
01037 typedef xnthread_t rtdm_task_t;
01038
01044 typedef void (*rtdm_task_proc_t)(void *arg);
01045
01050 #define RTDM_TASK_LOWEST_PRIORITY XNSCHED_LOW_PRIO
01051 #define RTDM_TASK_HIGHEST_PRIORITY XNSCHED_HIGH_PRIO
01052
01058 #define RTDM_TASK_RAISE_PRIORITY (+1)
01059 #define RTDM_TASK_LOWER_PRIORITY (-1)
01060
01064 int rtdm_task_init(rtdm_task_t *task, const char *name,
01065 rtdm_task_proc_t task_proc, void *arg,
01066 int priority, nanosecs_rel_t period);
01067 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
01068 void rtdm_task_busy_sleep(nanosecs_rel_t delay);
01069
01070 #ifndef DOXYGEN_CPP
01071 static inline void rtdm_task_destroy(rtdm_task_t *task)
01072 {
01073 xnpod_delete_thread(task);
01074 }
01075
01076 void rtdm_task_join_nrt(rtdm_task_t *task, unsigned int poll_delay);
01077
01078 static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
01079 {
01080 union xnsched_policy_param param = { .rt = { .prio = priority } };
01081 xnpod_set_thread_schedparam(task, &xnsched_class_rt, ¶m);
01082 xnpod_schedule();
01083 }
01084
01085 static inline int rtdm_task_set_period(rtdm_task_t *task,
01086 nanosecs_rel_t period)
01087 {
01088 if (period < 0)
01089 period = 0;
01090 return xnpod_set_thread_periodic(task, XN_INFINITE,
01091 xntbase_ns2ticks_ceil
01092 (xnthread_time_base(task), period));
01093 }
01094
01095 static inline int rtdm_task_unblock(rtdm_task_t *task)
01096 {
01097 int res = xnpod_unblock_thread(task);
01098
01099 xnpod_schedule();
01100 return res;
01101 }
01102
01103 static inline rtdm_task_t *rtdm_task_current(void)
01104 {
01105 return xnpod_current_thread();
01106 }
01107
01108 static inline int rtdm_task_wait_period(void)
01109 {
01110 XENO_ASSERT(RTDM, !xnpod_unblockable_p(), return -EPERM;);
01111 return xnpod_wait_thread_period(NULL);
01112 }
01113
01114 static inline int rtdm_task_sleep(nanosecs_rel_t delay)
01115 {
01116 return __rtdm_task_sleep(delay, XN_RELATIVE);
01117 }
01118
01119 static inline int
01120 rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
01121 {
01122
01123 if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
01124 return -EINVAL;
01125 return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
01126 }
01127
01128
01129 static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
01130 {
01131 return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
01132 }
01133 #endif
01134
01135
01136
01137 typedef nanosecs_abs_t rtdm_toseq_t;
01138
01139 void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
01140
01141
01142
01143 typedef struct {
01144 xnsynch_t synch_base;
01145 DECLARE_XNSELECT(select_block);
01146 } rtdm_event_t;
01147
01148 #define RTDM_EVENT_PENDING XNSYNCH_SPARE1
01149
01150 void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
01151 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01152 int rtdm_event_select_bind(rtdm_event_t *event, rtdm_selector_t *selector,
01153 enum rtdm_selecttype type, unsigned fd_index);
01154 #else
01155 #define rtdm_event_select_bind(e, s, t, i) ({ (void)(e); -EBADF; })
01156 #endif
01157 int rtdm_event_wait(rtdm_event_t *event);
01158 int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
01159 rtdm_toseq_t *timeout_seq);
01160 void rtdm_event_signal(rtdm_event_t *event);
01161
01162 void rtdm_event_clear(rtdm_event_t *event);
01163
01164 #ifndef DOXYGEN_CPP
01165 void __rtdm_synch_flush(xnsynch_t *synch, unsigned long reason);
01166
01167 static inline void rtdm_event_pulse(rtdm_event_t *event)
01168 {
01169 trace_mark(xn_rtdm, event_pulse, "event %p", event);
01170 __rtdm_synch_flush(&event->synch_base, 0);
01171 }
01172
01173 static inline void rtdm_event_destroy(rtdm_event_t *event)
01174 {
01175 trace_mark(xn_rtdm, event_destroy, "event %p", event);
01176 __rtdm_synch_flush(&event->synch_base, XNRMID);
01177 xnselect_destroy(&event->select_block);
01178 }
01179 #endif
01180
01181
01182
01183 typedef struct {
01184 unsigned long value;
01185 xnsynch_t synch_base;
01186 DECLARE_XNSELECT(select_block);
01187 } rtdm_sem_t;
01188
01189 void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
01190 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01191 int rtdm_sem_select_bind(rtdm_sem_t *sem, rtdm_selector_t *selector,
01192 enum rtdm_selecttype type, unsigned fd_index);
01193 #else
01194 #define rtdm_sem_select_bind(s, se, t, i) ({ (void)(s); -EBADF; })
01195 #endif
01196 int rtdm_sem_down(rtdm_sem_t *sem);
01197 int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
01198 rtdm_toseq_t *timeout_seq);
01199 void rtdm_sem_up(rtdm_sem_t *sem);
01200
01201 #ifndef DOXYGEN_CPP
01202 static inline void rtdm_sem_destroy(rtdm_sem_t *sem)
01203 {
01204 trace_mark(xn_rtdm, sem_destroy, "sem %p", sem);
01205 __rtdm_synch_flush(&sem->synch_base, XNRMID);
01206 xnselect_destroy(&sem->select_block);
01207 }
01208 #endif
01209
01210
01211
01212 typedef struct {
01213 xnsynch_t synch_base;
01214 } rtdm_mutex_t;
01215
01216 void rtdm_mutex_init(rtdm_mutex_t *mutex);
01217 int rtdm_mutex_lock(rtdm_mutex_t *mutex);
01218 int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
01219 rtdm_toseq_t *timeout_seq);
01220
01221 #ifndef DOXYGEN_CPP
01222 static inline void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
01223 {
01224 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return;);
01225
01226 trace_mark(xn_rtdm, mutex_unlock, "mutex %p", mutex);
01227
01228 if (unlikely(xnsynch_release(&mutex->synch_base) != NULL))
01229 xnpod_schedule();
01230 }
01231
01232 static inline void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
01233 {
01234 trace_mark(xn_rtdm, mutex_destroy, "mutex %p", mutex);
01235
01236 __rtdm_synch_flush(&mutex->synch_base, XNRMID);
01237 }
01238 #endif
01239
01240
01241
01242 #define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__)
01243
01244 #ifndef DOXYGEN_CPP
01245 static inline void *rtdm_malloc(size_t size)
01246 {
01247 return xnmalloc(size);
01248 }
01249
01250 static inline void rtdm_free(void *ptr)
01251 {
01252 xnfree(ptr);
01253 }
01254
01255 #ifdef CONFIG_XENO_OPT_PERVASIVE
01256 int rtdm_mmap_to_user(rtdm_user_info_t *user_info,
01257 void *src_addr, size_t len,
01258 int prot, void **pptr,
01259 struct vm_operations_struct *vm_ops,
01260 void *vm_private_data);
01261 int rtdm_iomap_to_user(rtdm_user_info_t *user_info,
01262 phys_addr_t src_addr, size_t len,
01263 int prot, void **pptr,
01264 struct vm_operations_struct *vm_ops,
01265 void *vm_private_data);
01266 int rtdm_munmap(rtdm_user_info_t *user_info, void *ptr, size_t len);
01267
01268 static inline int rtdm_read_user_ok(rtdm_user_info_t *user_info,
01269 const void __user *ptr, size_t size)
01270 {
01271 return access_rok(ptr, size);
01272 }
01273
01274 static inline int rtdm_rw_user_ok(rtdm_user_info_t *user_info,
01275 const void __user *ptr, size_t size)
01276 {
01277 return access_wok(ptr, size);
01278 }
01279
01280 static inline int rtdm_copy_from_user(rtdm_user_info_t *user_info,
01281 void *dst, const void __user *src,
01282 size_t size)
01283 {
01284 return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
01285 }
01286
01287 static inline int rtdm_safe_copy_from_user(rtdm_user_info_t *user_info,
01288 void *dst, const void __user *src,
01289 size_t size)
01290 {
01291 return (!access_rok(src, size) ||
01292 __xn_copy_from_user(dst, src, size)) ? -EFAULT : 0;
01293 }
01294
01295 static inline int rtdm_copy_to_user(rtdm_user_info_t *user_info,
01296 void __user *dst, const void *src,
01297 size_t size)
01298 {
01299 return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
01300 }
01301
01302 static inline int rtdm_safe_copy_to_user(rtdm_user_info_t *user_info,
01303 void __user *dst, const void *src,
01304 size_t size)
01305 {
01306 return (!access_wok(dst, size) ||
01307 __xn_copy_to_user(dst, src, size)) ? -EFAULT : 0;
01308 }
01309
01310 static inline int rtdm_strncpy_from_user(rtdm_user_info_t *user_info,
01311 char *dst,
01312 const char __user *src, size_t count)
01313 {
01314 if (unlikely(!access_rok(src, 1)))
01315 return -EFAULT;
01316 return __xn_strncpy_from_user(dst, src, count);
01317 }
01318
01319 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01320 {
01321 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01322
01323 return (user_info ? xnshadow_thread(user_info) != NULL
01324 : !xnpod_root_p());
01325 }
01326
01327 #else
01328
01329 #define rtdm_mmap_to_user(...) ({ -ENOSYS; })
01330 #define rtdm_munmap(...) ({ -ENOSYS; })
01331 #define rtdm_read_user_ok(...) ({ 0; })
01332 #define rtdm_rw_user_ok(...) ({ 0; })
01333 #define rtdm_copy_from_user(...) ({ -ENOSYS; })
01334 #define rtdm_safe_copy_from_user(...) ({ -ENOSYS; })
01335 #define rtdm_copy_to_user(...) ({ -ENOSYS; })
01336 #define rtdm_safe_copy_to_user(...) ({ -ENOSYS; })
01337 #define rtdm_strncpy_from_user(...) ({ -ENOSYS; })
01338
01339 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01340 {
01341 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01342
01343 return !xnpod_root_p();
01344 }
01345
01346 #endif
01347
01348 static inline int rtdm_in_rt_context(void)
01349 {
01350 return (rthal_current_domain != rthal_root_domain);
01351 }
01352
01353 #endif
01354
01355 int rtdm_exec_in_rt(struct rtdm_dev_context *context,
01356 rtdm_user_info_t *user_info, void *arg,
01357 rtdm_rt_handler_t handler);
01358
01359 #endif