mirror of
git://xwords.git.sourceforge.net/gitroot/xwords/xwords
synced 2025-01-30 08:34:16 +01:00
enque sockets in new object that contains type so later other events
(e.g. socket closes) can also be queued; track what sockets are currently being handled so no two threads are ever processing events on same socket. This makes running 'NGAMES=50 ./scripts/discon_ok2.sh' crash very infrequently rather than every time, but the race condition in crefmgr remains. Queuing socket closes should help.
This commit is contained in:
parent
54512b9e11
commit
53b3d7c32f
2 changed files with 54 additions and 8 deletions
|
@ -146,9 +146,9 @@ XWThreadPool::CloseSocket( int socket )
|
||||||
/* bool do_interrupt = false; */
|
/* bool do_interrupt = false; */
|
||||||
if ( !RemoveSocket( socket ) ) {
|
if ( !RemoveSocket( socket ) ) {
|
||||||
RWWriteLock rwl( &m_activeSocketsRWLock );
|
RWWriteLock rwl( &m_activeSocketsRWLock );
|
||||||
deque<int>::iterator iter = m_queue.begin();
|
deque<QueuePr>::iterator iter = m_queue.begin();
|
||||||
while ( iter != m_queue.end() ) {
|
while ( iter != m_queue.end() ) {
|
||||||
if ( *iter == socket ) {
|
if ( iter->second == socket ) {
|
||||||
m_queue.erase( iter );
|
m_queue.erase( iter );
|
||||||
/* do_interrupt = true; */
|
/* do_interrupt = true; */
|
||||||
break;
|
break;
|
||||||
|
@ -211,9 +211,12 @@ void*
|
||||||
XWThreadPool::real_tpool_main()
|
XWThreadPool::real_tpool_main()
|
||||||
{
|
{
|
||||||
logf( XW_LOGINFO, "tpool worker thread starting" );
|
logf( XW_LOGINFO, "tpool worker thread starting" );
|
||||||
|
int socket = -1;
|
||||||
for ( ; ; ) {
|
for ( ; ; ) {
|
||||||
|
|
||||||
pthread_mutex_lock( &m_queueMutex );
|
pthread_mutex_lock( &m_queueMutex );
|
||||||
|
release_socket_locked( socket );
|
||||||
|
|
||||||
while ( !m_timeToDie && m_queue.size() == 0 ) {
|
while ( !m_timeToDie && m_queue.size() == 0 ) {
|
||||||
pthread_cond_wait( &m_queueCondVar, &m_queueMutex );
|
pthread_cond_wait( &m_queueCondVar, &m_queueMutex );
|
||||||
}
|
}
|
||||||
|
@ -223,12 +226,16 @@ XWThreadPool::real_tpool_main()
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
int socket = m_queue.front();
|
QueuePr pr;
|
||||||
m_queue.pop_front();
|
if ( grab_elem_locked( socket, &pr ) ) {
|
||||||
|
socket = pr.second;
|
||||||
|
} else {
|
||||||
|
socket = -1;
|
||||||
|
}
|
||||||
pthread_mutex_unlock( &m_queueMutex );
|
pthread_mutex_unlock( &m_queueMutex );
|
||||||
logf( XW_LOGINFO, "worker thread got socket %d from queue", socket );
|
logf( XW_LOGINFO, "worker thread got socket %d from queue", socket );
|
||||||
|
|
||||||
if ( get_process_packet( socket ) ) {
|
if ( socket >= 0 && get_process_packet( socket ) ) {
|
||||||
AddSocket( socket );
|
AddSocket( socket );
|
||||||
} /* else drop it: error */
|
} /* else drop it: error */
|
||||||
}
|
}
|
||||||
|
@ -377,9 +384,40 @@ void
|
||||||
XWThreadPool::enqueue( int socket )
|
XWThreadPool::enqueue( int socket )
|
||||||
{
|
{
|
||||||
MutexLock ml( &m_queueMutex );
|
MutexLock ml( &m_queueMutex );
|
||||||
m_queue.push_back( socket );
|
pair<QAction, int> pr(Q_READ, socket);
|
||||||
|
m_queue.push_back( pr );
|
||||||
|
|
||||||
logf( XW_LOGINFO, "calling pthread_cond_signal" );
|
logf( XW_LOGINFO, "calling pthread_cond_signal" );
|
||||||
pthread_cond_signal( &m_queueCondVar );
|
pthread_cond_signal( &m_queueCondVar );
|
||||||
/* implicit unlock */
|
/* implicit unlock */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
XWThreadPool::grab_elem_locked( int curSock, QueuePr* prp )
|
||||||
|
{
|
||||||
|
bool found = false;
|
||||||
|
deque<QueuePr>::iterator iter;
|
||||||
|
for ( iter = m_queue.begin(); !found && iter != m_queue.end(); ++iter ) {
|
||||||
|
int socket = iter->second;
|
||||||
|
if ( socket == curSock
|
||||||
|
|| m_sockets_in_use.end() == m_sockets_in_use.find( socket ) ) {
|
||||||
|
*prp = *iter;
|
||||||
|
m_queue.erase( iter );
|
||||||
|
m_sockets_in_use.insert( socket );
|
||||||
|
found = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logf( XW_LOGINFO, "%s()=>%d", __func__, found );
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
XWThreadPool::release_socket_locked( int socket )
|
||||||
|
{
|
||||||
|
logf( XW_LOGINFO, "%s(%d)", __func__, socket );
|
||||||
|
if ( -1 != socket ) {
|
||||||
|
set<int>::iterator iter = m_sockets_in_use.find( socket );
|
||||||
|
assert( iter != m_sockets_in_use.end() );
|
||||||
|
m_sockets_in_use.erase( iter );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -48,13 +49,19 @@ class XWThreadPool {
|
||||||
/* remove from tpool altogether, and close */
|
/* remove from tpool altogether, and close */
|
||||||
void CloseSocket( int socket );
|
void CloseSocket( int socket );
|
||||||
|
|
||||||
void Poll();
|
void EnqueueKill( int socket );
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
typedef enum { Q_READ, Q_KILL } QAction;
|
||||||
|
typedef pair<QAction, int> QueuePr;
|
||||||
|
|
||||||
/* Remove from set being listened on */
|
/* Remove from set being listened on */
|
||||||
bool RemoveSocket( int socket );
|
bool RemoveSocket( int socket );
|
||||||
|
|
||||||
void enqueue( int socket );
|
void enqueue( int socket );
|
||||||
|
void release_socket_locked( int socket );
|
||||||
|
bool grab_elem_locked( int curSock, QueuePr* qpp );
|
||||||
|
|
||||||
bool get_process_packet( int socket );
|
bool get_process_packet( int socket );
|
||||||
void interrupt_poll();
|
void interrupt_poll();
|
||||||
|
|
||||||
|
@ -69,7 +76,8 @@ class XWThreadPool {
|
||||||
pthread_rwlock_t m_activeSocketsRWLock;
|
pthread_rwlock_t m_activeSocketsRWLock;
|
||||||
|
|
||||||
/* Sockets waiting for a thread to read 'em */
|
/* Sockets waiting for a thread to read 'em */
|
||||||
deque<int> m_queue;
|
deque<QueuePr> m_queue;
|
||||||
|
set<int> m_sockets_in_use;
|
||||||
pthread_mutex_t m_queueMutex;
|
pthread_mutex_t m_queueMutex;
|
||||||
pthread_cond_t m_queueCondVar;
|
pthread_cond_t m_queueCondVar;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue