sl@0
|
1 |
/*
|
sl@0
|
2 |
* tclNotify.c --
|
sl@0
|
3 |
*
|
sl@0
|
4 |
* This file implements the generic portion of the Tcl notifier.
|
sl@0
|
5 |
* The notifier is lowest-level part of the event system. It
|
sl@0
|
6 |
* manages an event queue that holds Tcl_Event structures. The
|
sl@0
|
7 |
* platform specific portion of the notifier is defined in the
|
sl@0
|
8 |
* tcl*Notify.c files in each platform directory.
|
sl@0
|
9 |
*
|
sl@0
|
10 |
* Copyright (c) 1995-1997 Sun Microsystems, Inc.
|
sl@0
|
11 |
* Copyright (c) 1998 by Scriptics Corporation.
|
sl@0
|
12 |
* Copyright (c) 2003 by Kevin B. Kenny. All rights reserved.
|
sl@0
|
13 |
* Portions Copyright (c) 2007-2008 Nokia Corporation and/or its subsidiaries. All rights reserved.
|
sl@0
|
14 |
*
|
sl@0
|
15 |
* See the file "license.terms" for information on usage and redistribution
|
sl@0
|
16 |
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
sl@0
|
17 |
*
|
sl@0
|
18 |
* RCS: @(#) $Id: tclNotify.c,v 1.11.2.2 2005/04/26 00:46:02 das Exp $
|
sl@0
|
19 |
*/
|
sl@0
|
20 |
|
sl@0
|
21 |
#include "tclInt.h"
|
sl@0
|
22 |
#include "tclPort.h"
|
sl@0
|
23 |
#if defined(__SYMBIAN32__) && defined(__WINSCW__)
|
sl@0
|
24 |
#include "tclSymbianGlobals.h"
|
sl@0
|
25 |
#define dataKey getdataKey(5)
|
sl@0
|
26 |
#endif
|
sl@0
|
27 |
|
sl@0
|
28 |
extern TclStubs tclStubs;
|
sl@0
|
29 |
|
sl@0
|
30 |
/*
|
sl@0
|
31 |
* For each event source (created with Tcl_CreateEventSource) there
|
sl@0
|
32 |
* is a structure of the following type:
|
sl@0
|
33 |
*/
|
sl@0
|
34 |
|
sl@0
|
35 |
typedef struct EventSource {
|
sl@0
|
36 |
Tcl_EventSetupProc *setupProc;
|
sl@0
|
37 |
Tcl_EventCheckProc *checkProc;
|
sl@0
|
38 |
ClientData clientData;
|
sl@0
|
39 |
struct EventSource *nextPtr;
|
sl@0
|
40 |
} EventSource;
|
sl@0
|
41 |
|
sl@0
|
42 |
/*
|
sl@0
|
43 |
* The following structure keeps track of the state of the notifier on a
|
sl@0
|
44 |
* per-thread basis. The first three elements keep track of the event queue.
|
sl@0
|
45 |
* In addition to the first (next to be serviced) and last events in the queue,
|
sl@0
|
46 |
* we keep track of a "marker" event. This provides a simple priority
|
sl@0
|
47 |
* mechanism whereby events can be inserted at the front of the queue but
|
sl@0
|
48 |
* behind all other high-priority events already in the queue (this is used for
|
sl@0
|
49 |
* things like a sequence of Enter and Leave events generated during a grab in
|
sl@0
|
50 |
* Tk). These elements are protected by the queueMutex so that any thread
|
sl@0
|
51 |
* can queue an event on any notifier. Note that all of the values in this
|
sl@0
|
52 |
* structure will be initialized to 0.
|
sl@0
|
53 |
*/
|
sl@0
|
54 |
|
sl@0
|
55 |
typedef struct ThreadSpecificData {
|
sl@0
|
56 |
Tcl_Event *firstEventPtr; /* First pending event, or NULL if none. */
|
sl@0
|
57 |
Tcl_Event *lastEventPtr; /* Last pending event, or NULL if none. */
|
sl@0
|
58 |
Tcl_Event *markerEventPtr; /* Last high-priority event in queue, or
|
sl@0
|
59 |
* NULL if none. */
|
sl@0
|
60 |
Tcl_Mutex queueMutex; /* Mutex to protect access to the previous
|
sl@0
|
61 |
* three fields. */
|
sl@0
|
62 |
int serviceMode; /* One of TCL_SERVICE_NONE or
|
sl@0
|
63 |
* TCL_SERVICE_ALL. */
|
sl@0
|
64 |
int blockTimeSet; /* 0 means there is no maximum block
|
sl@0
|
65 |
* time: block forever. */
|
sl@0
|
66 |
Tcl_Time blockTime; /* If blockTimeSet is 1, gives the
|
sl@0
|
67 |
* maximum elapsed time for the next block. */
|
sl@0
|
68 |
int inTraversal; /* 1 if Tcl_SetMaxBlockTime is being
|
sl@0
|
69 |
* called during an event source traversal. */
|
sl@0
|
70 |
EventSource *firstEventSourcePtr;
|
sl@0
|
71 |
/* Pointer to first event source in
|
sl@0
|
72 |
* list of event sources for this thread. */
|
sl@0
|
73 |
Tcl_ThreadId threadId; /* Thread that owns this notifier instance. */
|
sl@0
|
74 |
ClientData clientData; /* Opaque handle for platform specific
|
sl@0
|
75 |
* notifier. */
|
sl@0
|
76 |
int initialized; /* 1 if notifier has been initialized. */
|
sl@0
|
77 |
struct ThreadSpecificData *nextPtr;
|
sl@0
|
78 |
/* Next notifier in global list of notifiers.
|
sl@0
|
79 |
* Access is controlled by the listLock global
|
sl@0
|
80 |
* mutex. */
|
sl@0
|
81 |
} ThreadSpecificData;
|
sl@0
|
82 |
|
sl@0
|
83 |
#if !defined(__SYMBIAN32__) || !defined(__WINSCW__)
|
sl@0
|
84 |
static Tcl_ThreadDataKey dataKey;
|
sl@0
|
85 |
|
sl@0
|
86 |
|
sl@0
|
87 |
/*
|
sl@0
|
88 |
* Global list of notifiers. Access to this list is controlled by the
|
sl@0
|
89 |
* listLock mutex. If this becomes a performance bottleneck, this could
|
sl@0
|
90 |
* be replaced with a hashtable.
|
sl@0
|
91 |
*/
|
sl@0
|
92 |
|
sl@0
|
93 |
static ThreadSpecificData *firstNotifierPtr;
|
sl@0
|
94 |
#else
|
sl@0
|
95 |
#define firstNotifierPtr (*(ThreadSpecificData**)get_firstNotifierPtr())
|
sl@0
|
96 |
#endif
|
sl@0
|
97 |
TCL_DECLARE_MUTEX(listLock)
|
sl@0
|
98 |
|
sl@0
|
99 |
/*
|
sl@0
|
100 |
* Declarations for routines used only in this file.
|
sl@0
|
101 |
*/
|
sl@0
|
102 |
|
sl@0
|
103 |
static void QueueEvent _ANSI_ARGS_((ThreadSpecificData *tsdPtr,
|
sl@0
|
104 |
Tcl_Event* evPtr, Tcl_QueuePosition position));
|
sl@0
|
105 |
|
sl@0
|
106 |
/*
|
sl@0
|
107 |
*----------------------------------------------------------------------
|
sl@0
|
108 |
*
|
sl@0
|
109 |
* TclInitNotifier --
|
sl@0
|
110 |
*
|
sl@0
|
111 |
* Initialize the thread local data structures for the notifier
|
sl@0
|
112 |
* subsystem.
|
sl@0
|
113 |
*
|
sl@0
|
114 |
* Results:
|
sl@0
|
115 |
* None.
|
sl@0
|
116 |
*
|
sl@0
|
117 |
* Side effects:
|
sl@0
|
118 |
* Adds the current thread to the global list of notifiers.
|
sl@0
|
119 |
*
|
sl@0
|
120 |
*----------------------------------------------------------------------
|
sl@0
|
121 |
*/
|
sl@0
|
122 |
|
sl@0
|
123 |
void
|
sl@0
|
124 |
TclInitNotifier()
|
sl@0
|
125 |
{
|
sl@0
|
126 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
127 |
|
sl@0
|
128 |
Tcl_MutexLock(&listLock);
|
sl@0
|
129 |
|
sl@0
|
130 |
tsdPtr->threadId = Tcl_GetCurrentThread();
|
sl@0
|
131 |
tsdPtr->clientData = tclStubs.tcl_InitNotifier();
|
sl@0
|
132 |
tsdPtr->initialized = 1;
|
sl@0
|
133 |
tsdPtr->nextPtr = firstNotifierPtr;
|
sl@0
|
134 |
firstNotifierPtr = tsdPtr;
|
sl@0
|
135 |
|
sl@0
|
136 |
Tcl_MutexUnlock(&listLock);
|
sl@0
|
137 |
}
|
sl@0
|
138 |
|
sl@0
|
139 |
/*
|
sl@0
|
140 |
*----------------------------------------------------------------------
|
sl@0
|
141 |
*
|
sl@0
|
142 |
* TclFinalizeNotifier --
|
sl@0
|
143 |
*
|
sl@0
|
144 |
* Finalize the thread local data structures for the notifier
|
sl@0
|
145 |
* subsystem.
|
sl@0
|
146 |
*
|
sl@0
|
147 |
* Results:
|
sl@0
|
148 |
* None.
|
sl@0
|
149 |
*
|
sl@0
|
150 |
* Side effects:
|
sl@0
|
151 |
* Removes the notifier associated with the current thread from
|
sl@0
|
152 |
* the global notifier list. This is done only if the notifier
|
sl@0
|
153 |
* was initialized for this thread by call to TclInitNotifier().
|
sl@0
|
154 |
* This is always true for threads which have been seeded with
|
sl@0
|
155 |
* an Tcl interpreter, since the call to Tcl_CreateInterp will,
|
sl@0
|
156 |
* among other things, call TclInitializeSubsystems() and this
|
sl@0
|
157 |
* one will, in turn, call the TclInitNotifier() for the thread.
|
sl@0
|
158 |
* For threads created without the Tcl interpreter, though,
|
sl@0
|
159 |
* nobody is explicitly nor implicitly calling the TclInitNotifier
|
sl@0
|
160 |
* hence, TclFinalizeNotifier should not be performed at all.
|
sl@0
|
161 |
*
|
sl@0
|
162 |
*----------------------------------------------------------------------
|
sl@0
|
163 |
*/
|
sl@0
|
164 |
|
sl@0
|
165 |
void
|
sl@0
|
166 |
TclFinalizeNotifier()
|
sl@0
|
167 |
{
|
sl@0
|
168 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
169 |
ThreadSpecificData **prevPtrPtr;
|
sl@0
|
170 |
Tcl_Event *evPtr, *hold;
|
sl@0
|
171 |
|
sl@0
|
172 |
if (!tsdPtr->initialized) {
|
sl@0
|
173 |
return; /* Notifier not initialized for the current thread */
|
sl@0
|
174 |
}
|
sl@0
|
175 |
|
sl@0
|
176 |
Tcl_MutexLock(&(tsdPtr->queueMutex));
|
sl@0
|
177 |
for (evPtr = tsdPtr->firstEventPtr; evPtr != (Tcl_Event *) NULL; ) {
|
sl@0
|
178 |
hold = evPtr;
|
sl@0
|
179 |
evPtr = evPtr->nextPtr;
|
sl@0
|
180 |
ckfree((char *) hold);
|
sl@0
|
181 |
}
|
sl@0
|
182 |
tsdPtr->firstEventPtr = NULL;
|
sl@0
|
183 |
tsdPtr->lastEventPtr = NULL;
|
sl@0
|
184 |
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
|
sl@0
|
185 |
|
sl@0
|
186 |
Tcl_MutexLock(&listLock);
|
sl@0
|
187 |
|
sl@0
|
188 |
if (tclStubs.tcl_FinalizeNotifier) {
|
sl@0
|
189 |
tclStubs.tcl_FinalizeNotifier(tsdPtr->clientData);
|
sl@0
|
190 |
}
|
sl@0
|
191 |
Tcl_MutexFinalize(&(tsdPtr->queueMutex));
|
sl@0
|
192 |
for (prevPtrPtr = &firstNotifierPtr; *prevPtrPtr != NULL;
|
sl@0
|
193 |
prevPtrPtr = &((*prevPtrPtr)->nextPtr)) {
|
sl@0
|
194 |
if (*prevPtrPtr == tsdPtr) {
|
sl@0
|
195 |
*prevPtrPtr = tsdPtr->nextPtr;
|
sl@0
|
196 |
break;
|
sl@0
|
197 |
}
|
sl@0
|
198 |
}
|
sl@0
|
199 |
tsdPtr->initialized = 0;
|
sl@0
|
200 |
|
sl@0
|
201 |
Tcl_MutexUnlock(&listLock);
|
sl@0
|
202 |
}
|
sl@0
|
203 |
|
sl@0
|
204 |
/*
|
sl@0
|
205 |
*----------------------------------------------------------------------
|
sl@0
|
206 |
*
|
sl@0
|
207 |
* Tcl_SetNotifier --
|
sl@0
|
208 |
*
|
sl@0
|
209 |
* Install a set of alternate functions for use with the notifier.
|
sl@0
|
210 |
# In particular, this can be used to install the Xt-based
|
sl@0
|
211 |
* notifier for use with the Browser plugin.
|
sl@0
|
212 |
*
|
sl@0
|
213 |
* Results:
|
sl@0
|
214 |
* None.
|
sl@0
|
215 |
*
|
sl@0
|
216 |
* Side effects:
|
sl@0
|
217 |
* Overstomps part of the stub vector. This relies on hooks
|
sl@0
|
218 |
* added to the default procedures in case those are called
|
sl@0
|
219 |
* directly (i.e., not through the stub table.)
|
sl@0
|
220 |
*
|
sl@0
|
221 |
*----------------------------------------------------------------------
|
sl@0
|
222 |
*/
|
sl@0
|
223 |
|
sl@0
|
224 |
EXPORT_C void
|
sl@0
|
225 |
Tcl_SetNotifier(notifierProcPtr)
|
sl@0
|
226 |
Tcl_NotifierProcs *notifierProcPtr;
|
sl@0
|
227 |
{
|
sl@0
|
228 |
#if !defined(__WIN32__) && !defined(MAC_TCL) /* UNIX */
|
sl@0
|
229 |
tclStubs.tcl_CreateFileHandler = notifierProcPtr->createFileHandlerProc;
|
sl@0
|
230 |
tclStubs.tcl_DeleteFileHandler = notifierProcPtr->deleteFileHandlerProc;
|
sl@0
|
231 |
#endif
|
sl@0
|
232 |
tclStubs.tcl_SetTimer = notifierProcPtr->setTimerProc;
|
sl@0
|
233 |
tclStubs.tcl_WaitForEvent = notifierProcPtr->waitForEventProc;
|
sl@0
|
234 |
tclStubs.tcl_InitNotifier = notifierProcPtr->initNotifierProc;
|
sl@0
|
235 |
tclStubs.tcl_FinalizeNotifier = notifierProcPtr->finalizeNotifierProc;
|
sl@0
|
236 |
tclStubs.tcl_AlertNotifier = notifierProcPtr->alertNotifierProc;
|
sl@0
|
237 |
tclStubs.tcl_ServiceModeHook = notifierProcPtr->serviceModeHookProc;
|
sl@0
|
238 |
}
|
sl@0
|
239 |
|
sl@0
|
240 |
/*
|
sl@0
|
241 |
*----------------------------------------------------------------------
|
sl@0
|
242 |
*
|
sl@0
|
243 |
* Tcl_CreateEventSource --
|
sl@0
|
244 |
*
|
sl@0
|
245 |
* This procedure is invoked to create a new source of events.
|
sl@0
|
246 |
* The source is identified by a procedure that gets invoked
|
sl@0
|
247 |
* during Tcl_DoOneEvent to check for events on that source
|
sl@0
|
248 |
* and queue them.
|
sl@0
|
249 |
*
|
sl@0
|
250 |
*
|
sl@0
|
251 |
* Results:
|
sl@0
|
252 |
* None.
|
sl@0
|
253 |
*
|
sl@0
|
254 |
* Side effects:
|
sl@0
|
255 |
* SetupProc and checkProc will be invoked each time that Tcl_DoOneEvent
|
sl@0
|
256 |
* runs out of things to do. SetupProc will be invoked before
|
sl@0
|
257 |
* Tcl_DoOneEvent calls select or whatever else it uses to wait
|
sl@0
|
258 |
* for events. SetupProc typically calls functions like
|
sl@0
|
259 |
* Tcl_SetMaxBlockTime to indicate what to wait for.
|
sl@0
|
260 |
*
|
sl@0
|
261 |
* CheckProc is called after select or whatever operation was actually
|
sl@0
|
262 |
* used to wait. It figures out whether anything interesting actually
|
sl@0
|
263 |
* happened (e.g. by calling Tcl_AsyncReady), and then calls
|
sl@0
|
264 |
* Tcl_QueueEvent to queue any events that are ready.
|
sl@0
|
265 |
*
|
sl@0
|
266 |
* Each of these procedures is passed two arguments, e.g.
|
sl@0
|
267 |
* (*checkProc)(ClientData clientData, int flags));
|
sl@0
|
268 |
* ClientData is the same as the clientData argument here, and flags
|
sl@0
|
269 |
* is a combination of things like TCL_FILE_EVENTS that indicates
|
sl@0
|
270 |
* what events are of interest: setupProc and checkProc use flags
|
sl@0
|
271 |
* to figure out whether their events are relevant or not.
|
sl@0
|
272 |
*
|
sl@0
|
273 |
*----------------------------------------------------------------------
|
sl@0
|
274 |
*/
|
sl@0
|
275 |
|
sl@0
|
276 |
EXPORT_C void
|
sl@0
|
277 |
Tcl_CreateEventSource(setupProc, checkProc, clientData)
|
sl@0
|
278 |
Tcl_EventSetupProc *setupProc; /* Procedure to invoke to figure out
|
sl@0
|
279 |
* what to wait for. */
|
sl@0
|
280 |
Tcl_EventCheckProc *checkProc; /* Procedure to call after waiting
|
sl@0
|
281 |
* to see what happened. */
|
sl@0
|
282 |
ClientData clientData; /* One-word argument to pass to
|
sl@0
|
283 |
* setupProc and checkProc. */
|
sl@0
|
284 |
{
|
sl@0
|
285 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
286 |
EventSource *sourcePtr = (EventSource *) ckalloc(sizeof(EventSource));
|
sl@0
|
287 |
|
sl@0
|
288 |
sourcePtr->setupProc = setupProc;
|
sl@0
|
289 |
sourcePtr->checkProc = checkProc;
|
sl@0
|
290 |
sourcePtr->clientData = clientData;
|
sl@0
|
291 |
sourcePtr->nextPtr = tsdPtr->firstEventSourcePtr;
|
sl@0
|
292 |
tsdPtr->firstEventSourcePtr = sourcePtr;
|
sl@0
|
293 |
}
|
sl@0
|
294 |
|
sl@0
|
295 |
/*
|
sl@0
|
296 |
*----------------------------------------------------------------------
|
sl@0
|
297 |
*
|
sl@0
|
298 |
* Tcl_DeleteEventSource --
|
sl@0
|
299 |
*
|
sl@0
|
300 |
* This procedure is invoked to delete the source of events
|
sl@0
|
301 |
* given by proc and clientData.
|
sl@0
|
302 |
*
|
sl@0
|
303 |
* Results:
|
sl@0
|
304 |
* None.
|
sl@0
|
305 |
*
|
sl@0
|
306 |
* Side effects:
|
sl@0
|
307 |
* The given event source is cancelled, so its procedure will
|
sl@0
|
308 |
* never again be called. If no such source exists, nothing
|
sl@0
|
309 |
* happens.
|
sl@0
|
310 |
*
|
sl@0
|
311 |
*----------------------------------------------------------------------
|
sl@0
|
312 |
*/
|
sl@0
|
313 |
|
sl@0
|
314 |
EXPORT_C void
|
sl@0
|
315 |
Tcl_DeleteEventSource(setupProc, checkProc, clientData)
|
sl@0
|
316 |
Tcl_EventSetupProc *setupProc; /* Procedure to invoke to figure out
|
sl@0
|
317 |
* what to wait for. */
|
sl@0
|
318 |
Tcl_EventCheckProc *checkProc; /* Procedure to call after waiting
|
sl@0
|
319 |
* to see what happened. */
|
sl@0
|
320 |
ClientData clientData; /* One-word argument to pass to
|
sl@0
|
321 |
* setupProc and checkProc. */
|
sl@0
|
322 |
{
|
sl@0
|
323 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
324 |
EventSource *sourcePtr, *prevPtr;
|
sl@0
|
325 |
|
sl@0
|
326 |
for (sourcePtr = tsdPtr->firstEventSourcePtr, prevPtr = NULL;
|
sl@0
|
327 |
sourcePtr != NULL;
|
sl@0
|
328 |
prevPtr = sourcePtr, sourcePtr = sourcePtr->nextPtr) {
|
sl@0
|
329 |
if ((sourcePtr->setupProc != setupProc)
|
sl@0
|
330 |
|| (sourcePtr->checkProc != checkProc)
|
sl@0
|
331 |
|| (sourcePtr->clientData != clientData)) {
|
sl@0
|
332 |
continue;
|
sl@0
|
333 |
}
|
sl@0
|
334 |
if (prevPtr == NULL) {
|
sl@0
|
335 |
tsdPtr->firstEventSourcePtr = sourcePtr->nextPtr;
|
sl@0
|
336 |
} else {
|
sl@0
|
337 |
prevPtr->nextPtr = sourcePtr->nextPtr;
|
sl@0
|
338 |
}
|
sl@0
|
339 |
ckfree((char *) sourcePtr);
|
sl@0
|
340 |
return;
|
sl@0
|
341 |
}
|
sl@0
|
342 |
}
|
sl@0
|
343 |
|
sl@0
|
344 |
/*
|
sl@0
|
345 |
*----------------------------------------------------------------------
|
sl@0
|
346 |
*
|
sl@0
|
347 |
* Tcl_QueueEvent --
|
sl@0
|
348 |
*
|
sl@0
|
349 |
* Queue an event on the event queue associated with the
|
sl@0
|
350 |
* current thread.
|
sl@0
|
351 |
*
|
sl@0
|
352 |
* Results:
|
sl@0
|
353 |
* None.
|
sl@0
|
354 |
*
|
sl@0
|
355 |
* Side effects:
|
sl@0
|
356 |
* None.
|
sl@0
|
357 |
*
|
sl@0
|
358 |
*----------------------------------------------------------------------
|
sl@0
|
359 |
*/
|
sl@0
|
360 |
|
sl@0
|
361 |
EXPORT_C void
|
sl@0
|
362 |
Tcl_QueueEvent(evPtr, position)
|
sl@0
|
363 |
Tcl_Event* evPtr; /* Event to add to queue. The storage
|
sl@0
|
364 |
* space must have been allocated the caller
|
sl@0
|
365 |
* with malloc (ckalloc), and it becomes
|
sl@0
|
366 |
* the property of the event queue. It
|
sl@0
|
367 |
* will be freed after the event has been
|
sl@0
|
368 |
* handled. */
|
sl@0
|
369 |
Tcl_QueuePosition position; /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
|
sl@0
|
370 |
* TCL_QUEUE_MARK. */
|
sl@0
|
371 |
{
|
sl@0
|
372 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
373 |
QueueEvent(tsdPtr, evPtr, position);
|
sl@0
|
374 |
}
|
sl@0
|
375 |
|
sl@0
|
376 |
/*
|
sl@0
|
377 |
*----------------------------------------------------------------------
|
sl@0
|
378 |
*
|
sl@0
|
379 |
* Tcl_ThreadQueueEvent --
|
sl@0
|
380 |
*
|
sl@0
|
381 |
* Queue an event on the specified thread's event queue.
|
sl@0
|
382 |
*
|
sl@0
|
383 |
* Results:
|
sl@0
|
384 |
* None.
|
sl@0
|
385 |
*
|
sl@0
|
386 |
* Side effects:
|
sl@0
|
387 |
* None.
|
sl@0
|
388 |
*
|
sl@0
|
389 |
*----------------------------------------------------------------------
|
sl@0
|
390 |
*/
|
sl@0
|
391 |
|
sl@0
|
392 |
EXPORT_C void
|
sl@0
|
393 |
Tcl_ThreadQueueEvent(threadId, evPtr, position)
|
sl@0
|
394 |
Tcl_ThreadId threadId; /* Identifier for thread to use. */
|
sl@0
|
395 |
Tcl_Event* evPtr; /* Event to add to queue. The storage
|
sl@0
|
396 |
* space must have been allocated the caller
|
sl@0
|
397 |
* with malloc (ckalloc), and it becomes
|
sl@0
|
398 |
* the property of the event queue. It
|
sl@0
|
399 |
* will be freed after the event has been
|
sl@0
|
400 |
* handled. */
|
sl@0
|
401 |
Tcl_QueuePosition position; /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
|
sl@0
|
402 |
* TCL_QUEUE_MARK. */
|
sl@0
|
403 |
{
|
sl@0
|
404 |
ThreadSpecificData *tsdPtr;
|
sl@0
|
405 |
|
sl@0
|
406 |
/*
|
sl@0
|
407 |
* Find the notifier associated with the specified thread.
|
sl@0
|
408 |
*/
|
sl@0
|
409 |
|
sl@0
|
410 |
Tcl_MutexLock(&listLock);
|
sl@0
|
411 |
for (tsdPtr = firstNotifierPtr; tsdPtr && tsdPtr->threadId != threadId;
|
sl@0
|
412 |
tsdPtr = tsdPtr->nextPtr) {
|
sl@0
|
413 |
/* Empty loop body. */
|
sl@0
|
414 |
}
|
sl@0
|
415 |
|
sl@0
|
416 |
/*
|
sl@0
|
417 |
* Queue the event if there was a notifier associated with the thread.
|
sl@0
|
418 |
*/
|
sl@0
|
419 |
|
sl@0
|
420 |
if (tsdPtr) {
|
sl@0
|
421 |
QueueEvent(tsdPtr, evPtr, position);
|
sl@0
|
422 |
}
|
sl@0
|
423 |
Tcl_MutexUnlock(&listLock);
|
sl@0
|
424 |
}
|
sl@0
|
425 |
|
sl@0
|
426 |
/*
|
sl@0
|
427 |
*----------------------------------------------------------------------
|
sl@0
|
428 |
*
|
sl@0
|
429 |
* QueueEvent --
|
sl@0
|
430 |
*
|
sl@0
|
431 |
* Insert an event into the specified thread's event queue at one
|
sl@0
|
432 |
* of three positions: the head, the tail, or before a floating
|
sl@0
|
433 |
* marker. Events inserted before the marker will be processed in
|
sl@0
|
434 |
* first-in-first-out order, but before any events inserted at
|
sl@0
|
435 |
* the tail of the queue. Events inserted at the head of the
|
sl@0
|
436 |
* queue will be processed in last-in-first-out order.
|
sl@0
|
437 |
*
|
sl@0
|
438 |
* Results:
|
sl@0
|
439 |
* None.
|
sl@0
|
440 |
*
|
sl@0
|
441 |
* Side effects:
|
sl@0
|
442 |
* None.
|
sl@0
|
443 |
*
|
sl@0
|
444 |
*----------------------------------------------------------------------
|
sl@0
|
445 |
*/
|
sl@0
|
446 |
|
sl@0
|
447 |
static void
|
sl@0
|
448 |
QueueEvent(tsdPtr, evPtr, position)
|
sl@0
|
449 |
ThreadSpecificData *tsdPtr; /* Handle to thread local data that indicates
|
sl@0
|
450 |
* which event queue to use. */
|
sl@0
|
451 |
Tcl_Event* evPtr; /* Event to add to queue. The storage
|
sl@0
|
452 |
* space must have been allocated the caller
|
sl@0
|
453 |
* with malloc (ckalloc), and it becomes
|
sl@0
|
454 |
* the property of the event queue. It
|
sl@0
|
455 |
* will be freed after the event has been
|
sl@0
|
456 |
* handled. */
|
sl@0
|
457 |
Tcl_QueuePosition position; /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
|
sl@0
|
458 |
* TCL_QUEUE_MARK. */
|
sl@0
|
459 |
{
|
sl@0
|
460 |
Tcl_MutexLock(&(tsdPtr->queueMutex));
|
sl@0
|
461 |
if (position == TCL_QUEUE_TAIL) {
|
sl@0
|
462 |
/*
|
sl@0
|
463 |
* Append the event on the end of the queue.
|
sl@0
|
464 |
*/
|
sl@0
|
465 |
|
sl@0
|
466 |
evPtr->nextPtr = NULL;
|
sl@0
|
467 |
if (tsdPtr->firstEventPtr == NULL) {
|
sl@0
|
468 |
tsdPtr->firstEventPtr = evPtr;
|
sl@0
|
469 |
} else {
|
sl@0
|
470 |
tsdPtr->lastEventPtr->nextPtr = evPtr;
|
sl@0
|
471 |
}
|
sl@0
|
472 |
tsdPtr->lastEventPtr = evPtr;
|
sl@0
|
473 |
} else if (position == TCL_QUEUE_HEAD) {
|
sl@0
|
474 |
/*
|
sl@0
|
475 |
* Push the event on the head of the queue.
|
sl@0
|
476 |
*/
|
sl@0
|
477 |
|
sl@0
|
478 |
evPtr->nextPtr = tsdPtr->firstEventPtr;
|
sl@0
|
479 |
if (tsdPtr->firstEventPtr == NULL) {
|
sl@0
|
480 |
tsdPtr->lastEventPtr = evPtr;
|
sl@0
|
481 |
}
|
sl@0
|
482 |
tsdPtr->firstEventPtr = evPtr;
|
sl@0
|
483 |
} else if (position == TCL_QUEUE_MARK) {
|
sl@0
|
484 |
/*
|
sl@0
|
485 |
* Insert the event after the current marker event and advance
|
sl@0
|
486 |
* the marker to the new event.
|
sl@0
|
487 |
*/
|
sl@0
|
488 |
|
sl@0
|
489 |
if (tsdPtr->markerEventPtr == NULL) {
|
sl@0
|
490 |
evPtr->nextPtr = tsdPtr->firstEventPtr;
|
sl@0
|
491 |
tsdPtr->firstEventPtr = evPtr;
|
sl@0
|
492 |
} else {
|
sl@0
|
493 |
evPtr->nextPtr = tsdPtr->markerEventPtr->nextPtr;
|
sl@0
|
494 |
tsdPtr->markerEventPtr->nextPtr = evPtr;
|
sl@0
|
495 |
}
|
sl@0
|
496 |
tsdPtr->markerEventPtr = evPtr;
|
sl@0
|
497 |
if (evPtr->nextPtr == NULL) {
|
sl@0
|
498 |
tsdPtr->lastEventPtr = evPtr;
|
sl@0
|
499 |
}
|
sl@0
|
500 |
}
|
sl@0
|
501 |
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
|
sl@0
|
502 |
}
|
sl@0
|
503 |
|
sl@0
|
504 |
/*
|
sl@0
|
505 |
*----------------------------------------------------------------------
|
sl@0
|
506 |
*
|
sl@0
|
507 |
* Tcl_DeleteEvents --
|
sl@0
|
508 |
*
|
sl@0
|
509 |
* Calls a procedure for each event in the queue and deletes those
|
sl@0
|
510 |
* for which the procedure returns 1. Events for which the
|
sl@0
|
511 |
* procedure returns 0 are left in the queue. Operates on the
|
sl@0
|
512 |
* queue associated with the current thread.
|
sl@0
|
513 |
*
|
sl@0
|
514 |
* Results:
|
sl@0
|
515 |
* None.
|
sl@0
|
516 |
*
|
sl@0
|
517 |
* Side effects:
|
sl@0
|
518 |
* Potentially removes one or more events from the event queue.
|
sl@0
|
519 |
*
|
sl@0
|
520 |
*----------------------------------------------------------------------
|
sl@0
|
521 |
*/
|
sl@0
|
522 |
|
sl@0
|
523 |
EXPORT_C void
|
sl@0
|
524 |
Tcl_DeleteEvents(proc, clientData)
|
sl@0
|
525 |
Tcl_EventDeleteProc *proc; /* The procedure to call. */
|
sl@0
|
526 |
ClientData clientData; /* type-specific data. */
|
sl@0
|
527 |
{
|
sl@0
|
528 |
Tcl_Event *evPtr, *prevPtr, *hold;
|
sl@0
|
529 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
530 |
|
sl@0
|
531 |
Tcl_MutexLock(&(tsdPtr->queueMutex));
|
sl@0
|
532 |
for (prevPtr = (Tcl_Event *) NULL, evPtr = tsdPtr->firstEventPtr;
|
sl@0
|
533 |
evPtr != (Tcl_Event *) NULL;
|
sl@0
|
534 |
) {
|
sl@0
|
535 |
if ((*proc) (evPtr, clientData) == 1) {
|
sl@0
|
536 |
if (tsdPtr->firstEventPtr == evPtr) {
|
sl@0
|
537 |
tsdPtr->firstEventPtr = evPtr->nextPtr;
|
sl@0
|
538 |
} else {
|
sl@0
|
539 |
prevPtr->nextPtr = evPtr->nextPtr;
|
sl@0
|
540 |
}
|
sl@0
|
541 |
if (evPtr->nextPtr == (Tcl_Event *) NULL) {
|
sl@0
|
542 |
tsdPtr->lastEventPtr = prevPtr;
|
sl@0
|
543 |
}
|
sl@0
|
544 |
if (tsdPtr->markerEventPtr == evPtr) {
|
sl@0
|
545 |
tsdPtr->markerEventPtr = prevPtr;
|
sl@0
|
546 |
}
|
sl@0
|
547 |
hold = evPtr;
|
sl@0
|
548 |
evPtr = evPtr->nextPtr;
|
sl@0
|
549 |
ckfree((char *) hold);
|
sl@0
|
550 |
} else {
|
sl@0
|
551 |
prevPtr = evPtr;
|
sl@0
|
552 |
evPtr = evPtr->nextPtr;
|
sl@0
|
553 |
}
|
sl@0
|
554 |
}
|
sl@0
|
555 |
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
|
sl@0
|
556 |
}
|
sl@0
|
557 |
|
sl@0
|
558 |
/*
|
sl@0
|
559 |
*----------------------------------------------------------------------
|
sl@0
|
560 |
*
|
sl@0
|
561 |
* Tcl_ServiceEvent --
|
sl@0
|
562 |
*
|
sl@0
|
563 |
* Process one event from the event queue, or invoke an
|
sl@0
|
564 |
* asynchronous event handler. Operates on event queue for
|
sl@0
|
565 |
* current thread.
|
sl@0
|
566 |
*
|
sl@0
|
567 |
* Results:
|
sl@0
|
568 |
* The return value is 1 if the procedure actually found an event
|
sl@0
|
569 |
* to process. If no processing occurred, then 0 is returned.
|
sl@0
|
570 |
*
|
sl@0
|
571 |
* Side effects:
|
sl@0
|
572 |
* Invokes all of the event handlers for the highest priority
|
sl@0
|
573 |
* event in the event queue. May collapse some events into a
|
sl@0
|
574 |
* single event or discard stale events.
|
sl@0
|
575 |
*
|
sl@0
|
576 |
*----------------------------------------------------------------------
|
sl@0
|
577 |
*/
|
sl@0
|
578 |
|
sl@0
|
579 |
EXPORT_C int
|
sl@0
|
580 |
Tcl_ServiceEvent(flags)
|
sl@0
|
581 |
int flags; /* Indicates what events should be processed.
|
sl@0
|
582 |
* May be any combination of TCL_WINDOW_EVENTS
|
sl@0
|
583 |
* TCL_FILE_EVENTS, TCL_TIMER_EVENTS, or other
|
sl@0
|
584 |
* flags defined elsewhere. Events not
|
sl@0
|
585 |
* matching this will be skipped for processing
|
sl@0
|
586 |
* later. */
|
sl@0
|
587 |
{
|
sl@0
|
588 |
Tcl_Event *evPtr, *prevPtr;
|
sl@0
|
589 |
Tcl_EventProc *proc;
|
sl@0
|
590 |
int result;
|
sl@0
|
591 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
592 |
|
sl@0
|
593 |
/*
|
sl@0
|
594 |
* Asynchronous event handlers are considered to be the highest
|
sl@0
|
595 |
* priority events, and so must be invoked before we process events
|
sl@0
|
596 |
* on the event queue.
|
sl@0
|
597 |
*/
|
sl@0
|
598 |
|
sl@0
|
599 |
if (Tcl_AsyncReady()) {
|
sl@0
|
600 |
(void) Tcl_AsyncInvoke((Tcl_Interp *) NULL, 0);
|
sl@0
|
601 |
return 1;
|
sl@0
|
602 |
}
|
sl@0
|
603 |
|
sl@0
|
604 |
/*
|
sl@0
|
605 |
* No event flags is equivalent to TCL_ALL_EVENTS.
|
sl@0
|
606 |
*/
|
sl@0
|
607 |
|
sl@0
|
608 |
if ((flags & TCL_ALL_EVENTS) == 0) {
|
sl@0
|
609 |
flags |= TCL_ALL_EVENTS;
|
sl@0
|
610 |
}
|
sl@0
|
611 |
|
sl@0
|
612 |
/*
|
sl@0
|
613 |
* Loop through all the events in the queue until we find one
|
sl@0
|
614 |
* that can actually be handled.
|
sl@0
|
615 |
*/
|
sl@0
|
616 |
|
sl@0
|
617 |
Tcl_MutexLock(&(tsdPtr->queueMutex));
|
sl@0
|
618 |
for (evPtr = tsdPtr->firstEventPtr; evPtr != NULL;
|
sl@0
|
619 |
evPtr = evPtr->nextPtr) {
|
sl@0
|
620 |
/*
|
sl@0
|
621 |
* Call the handler for the event. If it actually handles the
|
sl@0
|
622 |
* event then free the storage for the event. There are two
|
sl@0
|
623 |
* tricky things here, both stemming from the fact that the event
|
sl@0
|
624 |
* code may be re-entered while servicing the event:
|
sl@0
|
625 |
*
|
sl@0
|
626 |
* 1. Set the "proc" field to NULL. This is a signal to ourselves
|
sl@0
|
627 |
* that we shouldn't reexecute the handler if the event loop
|
sl@0
|
628 |
* is re-entered.
|
sl@0
|
629 |
* 2. When freeing the event, must search the queue again from the
|
sl@0
|
630 |
* front to find it. This is because the event queue could
|
sl@0
|
631 |
* change almost arbitrarily while handling the event, so we
|
sl@0
|
632 |
* can't depend on pointers found now still being valid when
|
sl@0
|
633 |
* the handler returns.
|
sl@0
|
634 |
*/
|
sl@0
|
635 |
|
sl@0
|
636 |
proc = evPtr->proc;
|
sl@0
|
637 |
if (proc == NULL) {
|
sl@0
|
638 |
continue;
|
sl@0
|
639 |
}
|
sl@0
|
640 |
evPtr->proc = NULL;
|
sl@0
|
641 |
|
sl@0
|
642 |
/*
|
sl@0
|
643 |
* Release the lock before calling the event procedure. This
|
sl@0
|
644 |
* allows other threads to post events if we enter a recursive
|
sl@0
|
645 |
* event loop in this thread. Note that we are making the assumption
|
sl@0
|
646 |
* that if the proc returns 0, the event is still in the list.
|
sl@0
|
647 |
*/
|
sl@0
|
648 |
|
sl@0
|
649 |
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
|
sl@0
|
650 |
result = (*proc)(evPtr, flags);
|
sl@0
|
651 |
Tcl_MutexLock(&(tsdPtr->queueMutex));
|
sl@0
|
652 |
|
sl@0
|
653 |
if (result) {
|
sl@0
|
654 |
/*
|
sl@0
|
655 |
* The event was processed, so remove it from the queue.
|
sl@0
|
656 |
*/
|
sl@0
|
657 |
|
sl@0
|
658 |
if (tsdPtr->firstEventPtr == evPtr) {
|
sl@0
|
659 |
tsdPtr->firstEventPtr = evPtr->nextPtr;
|
sl@0
|
660 |
if (evPtr->nextPtr == NULL) {
|
sl@0
|
661 |
tsdPtr->lastEventPtr = NULL;
|
sl@0
|
662 |
}
|
sl@0
|
663 |
if (tsdPtr->markerEventPtr == evPtr) {
|
sl@0
|
664 |
tsdPtr->markerEventPtr = NULL;
|
sl@0
|
665 |
}
|
sl@0
|
666 |
} else {
|
sl@0
|
667 |
for (prevPtr = tsdPtr->firstEventPtr;
|
sl@0
|
668 |
prevPtr && prevPtr->nextPtr != evPtr;
|
sl@0
|
669 |
prevPtr = prevPtr->nextPtr) {
|
sl@0
|
670 |
/* Empty loop body. */
|
sl@0
|
671 |
}
|
sl@0
|
672 |
if (prevPtr) {
|
sl@0
|
673 |
prevPtr->nextPtr = evPtr->nextPtr;
|
sl@0
|
674 |
if (evPtr->nextPtr == NULL) {
|
sl@0
|
675 |
tsdPtr->lastEventPtr = prevPtr;
|
sl@0
|
676 |
}
|
sl@0
|
677 |
if (tsdPtr->markerEventPtr == evPtr) {
|
sl@0
|
678 |
tsdPtr->markerEventPtr = prevPtr;
|
sl@0
|
679 |
}
|
sl@0
|
680 |
} else {
|
sl@0
|
681 |
evPtr = NULL;
|
sl@0
|
682 |
}
|
sl@0
|
683 |
}
|
sl@0
|
684 |
if (evPtr) {
|
sl@0
|
685 |
ckfree((char *) evPtr);
|
sl@0
|
686 |
}
|
sl@0
|
687 |
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
|
sl@0
|
688 |
return 1;
|
sl@0
|
689 |
} else {
|
sl@0
|
690 |
/*
|
sl@0
|
691 |
* The event wasn't actually handled, so we have to restore
|
sl@0
|
692 |
* the proc field to allow the event to be attempted again.
|
sl@0
|
693 |
*/
|
sl@0
|
694 |
|
sl@0
|
695 |
evPtr->proc = proc;
|
sl@0
|
696 |
}
|
sl@0
|
697 |
}
|
sl@0
|
698 |
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
|
sl@0
|
699 |
return 0;
|
sl@0
|
700 |
}
|
sl@0
|
701 |
|
sl@0
|
702 |
/*
|
sl@0
|
703 |
*----------------------------------------------------------------------
|
sl@0
|
704 |
*
|
sl@0
|
705 |
* Tcl_GetServiceMode --
|
sl@0
|
706 |
*
|
sl@0
|
707 |
* This routine returns the current service mode of the notifier.
|
sl@0
|
708 |
*
|
sl@0
|
709 |
* Results:
|
sl@0
|
710 |
* Returns either TCL_SERVICE_ALL or TCL_SERVICE_NONE.
|
sl@0
|
711 |
*
|
sl@0
|
712 |
* Side effects:
|
sl@0
|
713 |
* None.
|
sl@0
|
714 |
*
|
sl@0
|
715 |
*----------------------------------------------------------------------
|
sl@0
|
716 |
*/
|
sl@0
|
717 |
|
sl@0
|
718 |
EXPORT_C int
|
sl@0
|
719 |
Tcl_GetServiceMode()
|
sl@0
|
720 |
{
|
sl@0
|
721 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
722 |
|
sl@0
|
723 |
return tsdPtr->serviceMode;
|
sl@0
|
724 |
}
|
sl@0
|
725 |
|
sl@0
|
726 |
/*
|
sl@0
|
727 |
*----------------------------------------------------------------------
|
sl@0
|
728 |
*
|
sl@0
|
729 |
* Tcl_SetServiceMode --
|
sl@0
|
730 |
*
|
sl@0
|
731 |
* This routine sets the current service mode of the tsdPtr->
|
sl@0
|
732 |
*
|
sl@0
|
733 |
* Results:
|
sl@0
|
734 |
* Returns the previous service mode.
|
sl@0
|
735 |
*
|
sl@0
|
736 |
* Side effects:
|
sl@0
|
737 |
* Invokes the notifier service mode hook procedure.
|
sl@0
|
738 |
*
|
sl@0
|
739 |
*----------------------------------------------------------------------
|
sl@0
|
740 |
*/
|
sl@0
|
741 |
|
sl@0
|
742 |
EXPORT_C int
|
sl@0
|
743 |
Tcl_SetServiceMode(mode)
|
sl@0
|
744 |
int mode; /* New service mode: TCL_SERVICE_ALL or
|
sl@0
|
745 |
* TCL_SERVICE_NONE */
|
sl@0
|
746 |
{
|
sl@0
|
747 |
int oldMode;
|
sl@0
|
748 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
749 |
|
sl@0
|
750 |
oldMode = tsdPtr->serviceMode;
|
sl@0
|
751 |
tsdPtr->serviceMode = mode;
|
sl@0
|
752 |
if (tclStubs.tcl_ServiceModeHook) {
|
sl@0
|
753 |
tclStubs.tcl_ServiceModeHook(mode);
|
sl@0
|
754 |
}
|
sl@0
|
755 |
return oldMode;
|
sl@0
|
756 |
}
|
sl@0
|
757 |
|
sl@0
|
758 |
/*
|
sl@0
|
759 |
*----------------------------------------------------------------------
|
sl@0
|
760 |
*
|
sl@0
|
761 |
* Tcl_SetMaxBlockTime --
|
sl@0
|
762 |
*
|
sl@0
|
763 |
* This procedure is invoked by event sources to tell the notifier
|
sl@0
|
764 |
* how long it may block the next time it blocks. The timePtr
|
sl@0
|
765 |
* argument gives a maximum time; the actual time may be less if
|
sl@0
|
766 |
* some other event source requested a smaller time.
|
sl@0
|
767 |
*
|
sl@0
|
768 |
* Results:
|
sl@0
|
769 |
* None.
|
sl@0
|
770 |
*
|
sl@0
|
771 |
* Side effects:
|
sl@0
|
772 |
* May reduce the length of the next sleep in the tsdPtr->
|
sl@0
|
773 |
*
|
sl@0
|
774 |
*----------------------------------------------------------------------
|
sl@0
|
775 |
*/
|
sl@0
|
776 |
|
sl@0
|
777 |
EXPORT_C void
|
sl@0
|
778 |
Tcl_SetMaxBlockTime(timePtr)
|
sl@0
|
779 |
Tcl_Time *timePtr; /* Specifies a maximum elapsed time for
|
sl@0
|
780 |
* the next blocking operation in the
|
sl@0
|
781 |
* event tsdPtr-> */
|
sl@0
|
782 |
{
|
sl@0
|
783 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
784 |
|
sl@0
|
785 |
if (!tsdPtr->blockTimeSet || (timePtr->sec < tsdPtr->blockTime.sec)
|
sl@0
|
786 |
|| ((timePtr->sec == tsdPtr->blockTime.sec)
|
sl@0
|
787 |
&& (timePtr->usec < tsdPtr->blockTime.usec))) {
|
sl@0
|
788 |
tsdPtr->blockTime = *timePtr;
|
sl@0
|
789 |
tsdPtr->blockTimeSet = 1;
|
sl@0
|
790 |
}
|
sl@0
|
791 |
|
sl@0
|
792 |
/*
|
sl@0
|
793 |
* If we are called outside an event source traversal, set the
|
sl@0
|
794 |
* timeout immediately.
|
sl@0
|
795 |
*/
|
sl@0
|
796 |
|
sl@0
|
797 |
if (!tsdPtr->inTraversal) {
|
sl@0
|
798 |
if (tsdPtr->blockTimeSet) {
|
sl@0
|
799 |
Tcl_SetTimer(&tsdPtr->blockTime);
|
sl@0
|
800 |
} else {
|
sl@0
|
801 |
Tcl_SetTimer(NULL);
|
sl@0
|
802 |
}
|
sl@0
|
803 |
}
|
sl@0
|
804 |
}
|
sl@0
|
805 |
|
sl@0
|
806 |
/*
|
sl@0
|
807 |
*----------------------------------------------------------------------
|
sl@0
|
808 |
*
|
sl@0
|
809 |
* Tcl_DoOneEvent --
|
sl@0
|
810 |
*
|
sl@0
|
811 |
* Process a single event of some sort. If there's no work to
|
sl@0
|
812 |
* do, wait for an event to occur, then process it.
|
sl@0
|
813 |
*
|
sl@0
|
814 |
* Results:
|
sl@0
|
815 |
* The return value is 1 if the procedure actually found an event
|
sl@0
|
816 |
* to process. If no processing occurred, then 0 is returned (this
|
sl@0
|
817 |
* can happen if the TCL_DONT_WAIT flag is set or if there are no
|
sl@0
|
818 |
* event handlers to wait for in the set specified by flags).
|
sl@0
|
819 |
*
|
sl@0
|
820 |
* Side effects:
|
sl@0
|
821 |
* May delay execution of process while waiting for an event,
|
sl@0
|
822 |
* unless TCL_DONT_WAIT is set in the flags argument. Event
|
sl@0
|
823 |
* sources are invoked to check for and queue events. Event
|
sl@0
|
824 |
* handlers may produce arbitrary side effects.
|
sl@0
|
825 |
*
|
sl@0
|
826 |
*----------------------------------------------------------------------
|
sl@0
|
827 |
*/
|
sl@0
|
828 |
|
sl@0
|
829 |
EXPORT_C int
|
sl@0
|
830 |
Tcl_DoOneEvent(flags)
|
sl@0
|
831 |
int flags; /* Miscellaneous flag values: may be any
|
sl@0
|
832 |
* combination of TCL_DONT_WAIT,
|
sl@0
|
833 |
* TCL_WINDOW_EVENTS, TCL_FILE_EVENTS,
|
sl@0
|
834 |
* TCL_TIMER_EVENTS, TCL_IDLE_EVENTS, or
|
sl@0
|
835 |
* others defined by event sources. */
|
sl@0
|
836 |
{
|
sl@0
|
837 |
int result = 0, oldMode;
|
sl@0
|
838 |
EventSource *sourcePtr;
|
sl@0
|
839 |
Tcl_Time *timePtr;
|
sl@0
|
840 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
841 |
|
sl@0
|
842 |
/*
|
sl@0
|
843 |
* The first thing we do is to service any asynchronous event
|
sl@0
|
844 |
* handlers.
|
sl@0
|
845 |
*/
|
sl@0
|
846 |
|
sl@0
|
847 |
if (Tcl_AsyncReady()) {
|
sl@0
|
848 |
(void) Tcl_AsyncInvoke((Tcl_Interp *) NULL, 0);
|
sl@0
|
849 |
return 1;
|
sl@0
|
850 |
}
|
sl@0
|
851 |
|
sl@0
|
852 |
/*
|
sl@0
|
853 |
* No event flags is equivalent to TCL_ALL_EVENTS.
|
sl@0
|
854 |
*/
|
sl@0
|
855 |
|
sl@0
|
856 |
if ((flags & TCL_ALL_EVENTS) == 0) {
|
sl@0
|
857 |
flags |= TCL_ALL_EVENTS;
|
sl@0
|
858 |
}
|
sl@0
|
859 |
|
sl@0
|
860 |
/*
|
sl@0
|
861 |
* Set the service mode to none so notifier event routines won't
|
sl@0
|
862 |
* try to service events recursively.
|
sl@0
|
863 |
*/
|
sl@0
|
864 |
|
sl@0
|
865 |
oldMode = tsdPtr->serviceMode;
|
sl@0
|
866 |
tsdPtr->serviceMode = TCL_SERVICE_NONE;
|
sl@0
|
867 |
|
sl@0
|
868 |
/*
|
sl@0
|
869 |
* The core of this procedure is an infinite loop, even though
|
sl@0
|
870 |
* we only service one event. The reason for this is that we
|
sl@0
|
871 |
* may be processing events that don't do anything inside of Tcl.
|
sl@0
|
872 |
*/
|
sl@0
|
873 |
|
sl@0
|
874 |
while (1) {
|
sl@0
|
875 |
|
sl@0
|
876 |
/*
|
sl@0
|
877 |
* If idle events are the only things to service, skip the
|
sl@0
|
878 |
* main part of the loop and go directly to handle idle
|
sl@0
|
879 |
* events (i.e. don't wait even if TCL_DONT_WAIT isn't set).
|
sl@0
|
880 |
*/
|
sl@0
|
881 |
|
sl@0
|
882 |
if ((flags & TCL_ALL_EVENTS) == TCL_IDLE_EVENTS) {
|
sl@0
|
883 |
flags = TCL_IDLE_EVENTS|TCL_DONT_WAIT;
|
sl@0
|
884 |
goto idleEvents;
|
sl@0
|
885 |
}
|
sl@0
|
886 |
|
sl@0
|
887 |
/*
|
sl@0
|
888 |
* Ask Tcl to service a queued event, if there are any.
|
sl@0
|
889 |
*/
|
sl@0
|
890 |
|
sl@0
|
891 |
if (Tcl_ServiceEvent(flags)) {
|
sl@0
|
892 |
result = 1;
|
sl@0
|
893 |
break;
|
sl@0
|
894 |
}
|
sl@0
|
895 |
|
sl@0
|
896 |
/*
|
sl@0
|
897 |
* If TCL_DONT_WAIT is set, be sure to poll rather than
|
sl@0
|
898 |
* blocking, otherwise reset the block time to infinity.
|
sl@0
|
899 |
*/
|
sl@0
|
900 |
|
sl@0
|
901 |
if (flags & TCL_DONT_WAIT) {
|
sl@0
|
902 |
tsdPtr->blockTime.sec = 0;
|
sl@0
|
903 |
tsdPtr->blockTime.usec = 0;
|
sl@0
|
904 |
tsdPtr->blockTimeSet = 1;
|
sl@0
|
905 |
} else {
|
sl@0
|
906 |
tsdPtr->blockTimeSet = 0;
|
sl@0
|
907 |
}
|
sl@0
|
908 |
|
sl@0
|
909 |
/*
|
sl@0
|
910 |
* Set up all the event sources for new events. This will
|
sl@0
|
911 |
* cause the block time to be updated if necessary.
|
sl@0
|
912 |
*/
|
sl@0
|
913 |
|
sl@0
|
914 |
tsdPtr->inTraversal = 1;
|
sl@0
|
915 |
for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
|
sl@0
|
916 |
sourcePtr = sourcePtr->nextPtr) {
|
sl@0
|
917 |
if (sourcePtr->setupProc) {
|
sl@0
|
918 |
(sourcePtr->setupProc)(sourcePtr->clientData, flags);
|
sl@0
|
919 |
}
|
sl@0
|
920 |
}
|
sl@0
|
921 |
tsdPtr->inTraversal = 0;
|
sl@0
|
922 |
|
sl@0
|
923 |
if ((flags & TCL_DONT_WAIT) || tsdPtr->blockTimeSet) {
|
sl@0
|
924 |
timePtr = &tsdPtr->blockTime;
|
sl@0
|
925 |
} else {
|
sl@0
|
926 |
timePtr = NULL;
|
sl@0
|
927 |
}
|
sl@0
|
928 |
|
sl@0
|
929 |
/*
|
sl@0
|
930 |
* Wait for a new event or a timeout. If Tcl_WaitForEvent
|
sl@0
|
931 |
* returns -1, we should abort Tcl_DoOneEvent.
|
sl@0
|
932 |
*/
|
sl@0
|
933 |
|
sl@0
|
934 |
result = Tcl_WaitForEvent(timePtr);
|
sl@0
|
935 |
if (result < 0) {
|
sl@0
|
936 |
result = 0;
|
sl@0
|
937 |
break;
|
sl@0
|
938 |
}
|
sl@0
|
939 |
|
sl@0
|
940 |
/*
|
sl@0
|
941 |
* Check all the event sources for new events.
|
sl@0
|
942 |
*/
|
sl@0
|
943 |
|
sl@0
|
944 |
for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
|
sl@0
|
945 |
sourcePtr = sourcePtr->nextPtr) {
|
sl@0
|
946 |
if (sourcePtr->checkProc) {
|
sl@0
|
947 |
(sourcePtr->checkProc)(sourcePtr->clientData, flags);
|
sl@0
|
948 |
}
|
sl@0
|
949 |
}
|
sl@0
|
950 |
|
sl@0
|
951 |
/*
|
sl@0
|
952 |
* Check for events queued by the notifier or event sources.
|
sl@0
|
953 |
*/
|
sl@0
|
954 |
|
sl@0
|
955 |
if (Tcl_ServiceEvent(flags)) {
|
sl@0
|
956 |
result = 1;
|
sl@0
|
957 |
break;
|
sl@0
|
958 |
}
|
sl@0
|
959 |
|
sl@0
|
960 |
/*
|
sl@0
|
961 |
* We've tried everything at this point, but nobody we know
|
sl@0
|
962 |
* about had anything to do. Check for idle events. If none,
|
sl@0
|
963 |
* either quit or go back to the top and try again.
|
sl@0
|
964 |
*/
|
sl@0
|
965 |
|
sl@0
|
966 |
idleEvents:
|
sl@0
|
967 |
if (flags & TCL_IDLE_EVENTS) {
|
sl@0
|
968 |
if (TclServiceIdle()) {
|
sl@0
|
969 |
result = 1;
|
sl@0
|
970 |
break;
|
sl@0
|
971 |
}
|
sl@0
|
972 |
}
|
sl@0
|
973 |
if (flags & TCL_DONT_WAIT) {
|
sl@0
|
974 |
break;
|
sl@0
|
975 |
}
|
sl@0
|
976 |
|
sl@0
|
977 |
/*
|
sl@0
|
978 |
* If Tcl_WaitForEvent has returned 1,
|
sl@0
|
979 |
* indicating that one system event has been dispatched
|
sl@0
|
980 |
* (and thus that some Tcl code might have been indirectly executed),
|
sl@0
|
981 |
* we break out of the loop.
|
sl@0
|
982 |
* We do this to give VwaitCmd for instance a chance to check
|
sl@0
|
983 |
* if that system event had the side effect of changing the
|
sl@0
|
984 |
* variable (so the vwait can return and unwind properly).
|
sl@0
|
985 |
*
|
sl@0
|
986 |
* NB: We will process idle events if any first, because
|
sl@0
|
987 |
* otherwise we might never do the idle events if the notifier
|
sl@0
|
988 |
* always gets system events.
|
sl@0
|
989 |
*/
|
sl@0
|
990 |
|
sl@0
|
991 |
if (result) {
|
sl@0
|
992 |
break;
|
sl@0
|
993 |
}
|
sl@0
|
994 |
|
sl@0
|
995 |
}
|
sl@0
|
996 |
|
sl@0
|
997 |
tsdPtr->serviceMode = oldMode;
|
sl@0
|
998 |
return result;
|
sl@0
|
999 |
}
|
sl@0
|
1000 |
|
sl@0
|
1001 |
/*
|
sl@0
|
1002 |
*----------------------------------------------------------------------
|
sl@0
|
1003 |
*
|
sl@0
|
1004 |
* Tcl_ServiceAll --
|
sl@0
|
1005 |
*
|
sl@0
|
1006 |
* This routine checks all of the event sources, processes
|
sl@0
|
1007 |
* events that are on the Tcl event queue, and then calls the
|
sl@0
|
1008 |
* any idle handlers. Platform specific notifier callbacks that
|
sl@0
|
1009 |
* generate events should call this routine before returning to
|
sl@0
|
1010 |
* the system in order to ensure that Tcl gets a chance to
|
sl@0
|
1011 |
* process the new events.
|
sl@0
|
1012 |
*
|
sl@0
|
1013 |
* Results:
|
sl@0
|
1014 |
* Returns 1 if an event or idle handler was invoked, else 0.
|
sl@0
|
1015 |
*
|
sl@0
|
1016 |
* Side effects:
|
sl@0
|
1017 |
* Anything that an event or idle handler may do.
|
sl@0
|
1018 |
*
|
sl@0
|
1019 |
*----------------------------------------------------------------------
|
sl@0
|
1020 |
*/
|
sl@0
|
1021 |
|
sl@0
|
1022 |
EXPORT_C int
|
sl@0
|
1023 |
Tcl_ServiceAll()
|
sl@0
|
1024 |
{
|
sl@0
|
1025 |
int result = 0;
|
sl@0
|
1026 |
EventSource *sourcePtr;
|
sl@0
|
1027 |
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
|
sl@0
|
1028 |
|
sl@0
|
1029 |
if (tsdPtr->serviceMode == TCL_SERVICE_NONE) {
|
sl@0
|
1030 |
return result;
|
sl@0
|
1031 |
}
|
sl@0
|
1032 |
|
sl@0
|
1033 |
/*
|
sl@0
|
1034 |
* We need to turn off event servicing like we to in Tcl_DoOneEvent,
|
sl@0
|
1035 |
* to avoid recursive calls.
|
sl@0
|
1036 |
*/
|
sl@0
|
1037 |
|
sl@0
|
1038 |
tsdPtr->serviceMode = TCL_SERVICE_NONE;
|
sl@0
|
1039 |
|
sl@0
|
1040 |
/*
|
sl@0
|
1041 |
* Check async handlers first.
|
sl@0
|
1042 |
*/
|
sl@0
|
1043 |
|
sl@0
|
1044 |
if (Tcl_AsyncReady()) {
|
sl@0
|
1045 |
(void) Tcl_AsyncInvoke((Tcl_Interp *) NULL, 0);
|
sl@0
|
1046 |
}
|
sl@0
|
1047 |
|
sl@0
|
1048 |
/*
|
sl@0
|
1049 |
* Make a single pass through all event sources, queued events,
|
sl@0
|
1050 |
* and idle handlers. Note that we wait to update the notifier
|
sl@0
|
1051 |
* timer until the end so we can avoid multiple changes.
|
sl@0
|
1052 |
*/
|
sl@0
|
1053 |
|
sl@0
|
1054 |
tsdPtr->inTraversal = 1;
|
sl@0
|
1055 |
tsdPtr->blockTimeSet = 0;
|
sl@0
|
1056 |
|
sl@0
|
1057 |
for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
|
sl@0
|
1058 |
sourcePtr = sourcePtr->nextPtr) {
|
sl@0
|
1059 |
if (sourcePtr->setupProc) {
|
sl@0
|
1060 |
(sourcePtr->setupProc)(sourcePtr->clientData, TCL_ALL_EVENTS);
|
sl@0
|
1061 |
}
|
sl@0
|
1062 |
}
|
sl@0
|
1063 |
for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
|
sl@0
|
1064 |
sourcePtr = sourcePtr->nextPtr) {
|
sl@0
|
1065 |
if (sourcePtr->checkProc) {
|
sl@0
|
1066 |
(sourcePtr->checkProc)(sourcePtr->clientData, TCL_ALL_EVENTS);
|
sl@0
|
1067 |
}
|
sl@0
|
1068 |
}
|
sl@0
|
1069 |
|
sl@0
|
1070 |
while (Tcl_ServiceEvent(0)) {
|
sl@0
|
1071 |
result = 1;
|
sl@0
|
1072 |
}
|
sl@0
|
1073 |
if (TclServiceIdle()) {
|
sl@0
|
1074 |
result = 1;
|
sl@0
|
1075 |
}
|
sl@0
|
1076 |
|
sl@0
|
1077 |
if (!tsdPtr->blockTimeSet) {
|
sl@0
|
1078 |
Tcl_SetTimer(NULL);
|
sl@0
|
1079 |
} else {
|
sl@0
|
1080 |
Tcl_SetTimer(&tsdPtr->blockTime);
|
sl@0
|
1081 |
}
|
sl@0
|
1082 |
tsdPtr->inTraversal = 0;
|
sl@0
|
1083 |
tsdPtr->serviceMode = TCL_SERVICE_ALL;
|
sl@0
|
1084 |
return result;
|
sl@0
|
1085 |
}
|
sl@0
|
1086 |
|
sl@0
|
1087 |
/*
|
sl@0
|
1088 |
*----------------------------------------------------------------------
|
sl@0
|
1089 |
*
|
sl@0
|
1090 |
* Tcl_ThreadAlert --
|
sl@0
|
1091 |
*
|
sl@0
|
1092 |
* This function wakes up the notifier associated with the
|
sl@0
|
1093 |
* specified thread (if there is one).
|
sl@0
|
1094 |
*
|
sl@0
|
1095 |
* Results:
|
sl@0
|
1096 |
* None.
|
sl@0
|
1097 |
*
|
sl@0
|
1098 |
* Side effects:
|
sl@0
|
1099 |
* None.
|
sl@0
|
1100 |
*
|
sl@0
|
1101 |
*----------------------------------------------------------------------
|
sl@0
|
1102 |
*/
|
sl@0
|
1103 |
|
sl@0
|
1104 |
EXPORT_C void
|
sl@0
|
1105 |
Tcl_ThreadAlert(threadId)
|
sl@0
|
1106 |
Tcl_ThreadId threadId; /* Identifier for thread to use. */
|
sl@0
|
1107 |
{
|
sl@0
|
1108 |
ThreadSpecificData *tsdPtr;
|
sl@0
|
1109 |
|
sl@0
|
1110 |
/*
|
sl@0
|
1111 |
* Find the notifier associated with the specified thread.
|
sl@0
|
1112 |
* Note that we need to hold the listLock while calling
|
sl@0
|
1113 |
* Tcl_AlertNotifier to avoid a race condition where
|
sl@0
|
1114 |
* the specified thread might destroy its notifier.
|
sl@0
|
1115 |
*/
|
sl@0
|
1116 |
|
sl@0
|
1117 |
Tcl_MutexLock(&listLock);
|
sl@0
|
1118 |
for (tsdPtr = firstNotifierPtr; tsdPtr; tsdPtr = tsdPtr->nextPtr) {
|
sl@0
|
1119 |
if (tsdPtr->threadId == threadId) {
|
sl@0
|
1120 |
if (tclStubs.tcl_AlertNotifier) {
|
sl@0
|
1121 |
tclStubs.tcl_AlertNotifier(tsdPtr->clientData);
|
sl@0
|
1122 |
}
|
sl@0
|
1123 |
break;
|
sl@0
|
1124 |
}
|
sl@0
|
1125 |
}
|
sl@0
|
1126 |
Tcl_MutexUnlock(&listLock);
|
sl@0
|
1127 |
}
|