Re: Python/thread.c patch for POSIX pthreads

Tim Peters (tim@ksr.com)
Sat, 07 May 94 23:23:36 -0400

> [tim]
> ... The attached uses a busy loop on pthread_mutex_trylock instead,
> which sucks ... [but] I don't personally care how wasteful the busy
> loop is anyway.

Ya, right. The attached replacement patch does it closer to the right
way. A happy consequence is that the following pretty Generator demo
prints

2 3 5 7 11 13 17 19 23 29 31 37 41 43 47 53 59 61 67 71 73 79 83 89 97
101 103 107 109 113

about 500x faster <wink>.

# intsFrom(n) returns generator g s.t. g.get() delivers n, n+1, n+2, ...
def _intsFrom(gout, n):
deliver = gout.put
while 1:
deliver(n)
n = n + 1

def intsFrom(n): return Generator( _intsFrom, (n,) )

# nuke_multiples(gin, n) returns generator g s.t. g.get() delivers
# the elements of gin that aren't divisible by n
def _nuke_multiples(gout, gin, n):
deliver, get = gout.put, gin.get
while 1:
next = get()
if next % n:
deliver(next)

def nuke_multiples(gin, n): return Generator( _nuke_multiples, (gin,n) )

def sieve(gout, gin):
deliver = gout.put
while 1:
prime = gin.get()
deliver(prime)
gin = nuke_multiples(gin, prime)

primes = Generator( sieve, (intsFrom(2),) )

for i in range(30):
print primes.get(),
print

non-recursive-ly y'rs - tim

Tim Peters tim@ksr.com
not speaking for Kendall Square Research Corp

*** thread.c Wed May 4 05:36:33 1994
--- ../../Python/thread.c Sat May 7 22:33:50 1994
***************
*** 30,35 ****
--- 30,39 ----
#define USE_DL
#endif

+ #ifdef __ksr__
+ #define _POSIX_THREADS
+ #endif
+
#ifdef HAVE_THREAD_H
#define SOLARIS
#endif
***************
*** 98,103 ****
--- 102,128 ----
#endif /* C_THREADS */
#ifdef _POSIX_THREADS
#include <pthread.h>
+ #include <stdlib.h>
+
+ /* A pthread mutex isn't sufficient to model the Python lock type (at
+ * least not the way KSR did 'em -- haven't dug thru the docs to verify),
+ * because a thread that locks a mutex can't then do a pthread_mutex_lock
+ * on it (to wait for another thread to unlock it).
+ * In any case, pthread mutexes are designed for serializing threads over
+ * short pieces of code, so wouldn't be an appropriate implementation of
+ * Python's locks regardless.
+ * The pthread_lock struct below implements a Python lock as a pthread
+ * mutex and a <condition, mutex> pair. In general, if the mutex can be
+ * be acquired instantly, it is, else the pair is used to block the
+ * thread until the mutex is released. 7 May 1994 tim@ksr.com
+ */
+ typedef struct {
+ /* the lock */
+ pthread_mutex_t mutex;
+ /* a <cond, mutex> pair to handle an acquire of a locked mutex */
+ pthread_cond_t cond;
+ pthread_mutex_t cmutex;
+ } pthread_lock;
#endif /* _POSIX_THREADS */

#ifdef __STDC__
***************
*** 301,307 ****
#endif /* __sgi and USE_DL */
#ifdef _POSIX_THREADS
pthread_t th;
! #endif
int success = 0; /* init not needed when SOLARIS and */
/* C_THREADS implemented properly */

--- 326,332 ----
#endif /* __sgi and USE_DL */
#ifdef _POSIX_THREADS
pthread_t th;
! #endif /* _POSIX_THREADS */
int success = 0; /* init not needed when SOLARIS and */
/* C_THREADS implemented properly */

***************
*** 365,372 ****
(void) cthread_fork(func, arg);
#endif /* C_THREADS */
#ifdef _POSIX_THREADS
! pthread_create(&th, NULL, func, arg);
! #endif
return success < 0 ? 0 : 1;
}

--- 390,397 ----
(void) cthread_fork(func, arg);
#endif /* C_THREADS */
#ifdef _POSIX_THREADS
! success = pthread_create(&th, pthread_attr_default, func, arg);
! #endif /* _POSIX_THREADS */
return success < 0 ? 0 : 1;
}

***************
*** 507,512 ****
--- 532,540 ----
struct lock *lock;
extern char *malloc();
#endif
+ #ifdef _POSIX_THREADS
+ pthread_lock *lock;
+ #endif /* _POSIX_THREADS */

dprintf(("allocate_lock called\n"));
if (!initialized)
***************
*** 531,536 ****
--- 559,590 ----
(void) mon_create(&lock->lock_monitor);
(void) cv_create(&lock->lock_condvar, lock->lock_monitor);
#endif /* sun */
+ #ifdef _POSIX_THREADS
+ lock = (pthread_lock *) malloc(sizeof(pthread_lock));
+ {
+ int err = 0;
+ if ( pthread_mutex_init(&lock->mutex,
+ pthread_mutexattr_default) ) {
+ perror("pthread_mutex_init");
+ err = 1;
+ }
+ if ( pthread_cond_init(&lock->cond,
+ pthread_condattr_default) ) {
+ perror("pthread_cond_init");
+ err = 1;
+ }
+ if ( pthread_mutex_init(&lock->cmutex,
+ pthread_mutexattr_default)) {
+ perror("pthread_mutex_init");
+ err = 1;
+ }
+ if (err) {
+ free((void *)lock);
+ lock = 0;
+ }
+ }
+
+ #endif /* _POSIX_THREADS */
dprintf(("allocate_lock() -> %lx\n", (long)lock));
return (type_lock) lock;
}
***************
*** 549,554 ****
--- 603,617 ----
mon_destroy(((struct lock *) lock)->lock_monitor);
free((char *) lock);
#endif /* sun */
+ #ifdef _POSIX_THREADS
+ if ( pthread_mutex_destroy(&((pthread_lock *)lock)->mutex) )
+ perror("pthread_mutex_destroy");
+ if ( pthread_cond_destroy(&((pthread_lock *)lock)->cond) )
+ perror("pthread_cond_destroy");
+ if ( pthread_mutex_destroy(&((pthread_lock *)lock)->cmutex) )
+ perror("pthread_mutex_destroy");
+ free((void *)lock);
+ #endif /* _POSIX_THREADS */
}

int acquire_lock _P2(lock, type_lock lock, waitflag, int waitflag)
***************
*** 589,594 ****
--- 652,685 ----
cv_broadcast(((struct lock *) lock)->lock_condvar);
mon_exit(((struct lock *) lock)->lock_monitor);
#endif /* sun */
+ #ifdef _POSIX_THREADS
+ {
+ pthread_lock *thelock = (pthread_lock *)lock;
+ success = pthread_mutex_trylock( &thelock->mutex );
+ if (success < 0) {
+ perror("pthread_mutex_trylock");
+ success = 0;
+ } else if ( success == 0 && waitflag ) {
+ /* continue trying until we get the lock */
+
+ /* cmutex must be locked by me -- part of the condition
+ * protocol */
+ if ( pthread_mutex_lock( &thelock->cmutex ) )
+ perror("pthread_mutex_lock");
+ while ( 0 == (success =
+ pthread_mutex_trylock(&thelock->mutex)) ) {
+ if ( pthread_cond_wait(&thelock->cond,
+ &thelock->cmutex) )
+ perror("pthread_cond_wait");
+ }
+ if (success < 0)
+ perror("pthread_mutex_trylock");
+ /* now ->mutex & ->cmutex are both locked by me */
+ if ( pthread_mutex_unlock( &thelock->cmutex ) )
+ perror("pthread_mutex_unlock");
+ }
+ }
+ #endif /* _POSIX_THREADS */
dprintf(("acquire_lock(%lx, %d) -> %d\n", (long)lock, waitflag, success));
return success;
}
***************
*** 610,615 ****
--- 701,728 ----
cv_broadcast(((struct lock *) lock)->lock_condvar);
mon_exit(((struct lock *) lock)->lock_monitor);
#endif /* sun */
+ #ifdef _POSIX_THREADS
+ {
+ pthread_lock *thelock = (pthread_lock *)lock;
+
+ /* tricky: if the release & signal occur between the
+ * pthread_mutex_trylock(&thelock->mutex))
+ * and pthread_cond_wait during the acquire, the acquire
+ * will miss the signal it's waiting for; locking cmutex
+ * around the release prevents that
+ */
+ if (pthread_mutex_lock( &thelock->cmutex ))
+ perror("pthread_mutex_lock");
+ if (pthread_mutex_unlock( &thelock->mutex ))
+ perror("pthread_mutex_unlock");
+ if (pthread_mutex_unlock( &thelock->cmutex ))
+ perror("pthread_mutex_unlock");
+
+ /* wake up someone (anyone, if any) waiting on the lock */
+ if (pthread_cond_signal( &thelock->cond ))
+ perror("pthread_cond_signal");
+ }
+ #endif /* _POSIX_THREADS */
}

/*
***************
*** 626,632 ****
#ifdef sun
type_sema sema = 0;
#endif
!
dprintf(("allocate_sema called\n"));
if (!initialized)
init_thread();
--- 739,747 ----
#ifdef sun
type_sema sema = 0;
#endif
! #ifdef _POSIX_THREADS
! char *sema = 0;
! #endif
dprintf(("allocate_sema called\n"));
if (!initialized)
init_thread();

>>> END OF PATCH