mirror of
git://xwords.git.sourceforge.net/gitroot/xwords/xwords
synced 2025-01-24 07:58:34 +01:00
Don't block UI thread until service instances released
I thought I had to stop using a service instance before returning from its onDestroy(), but that made the UI incredibly laggy AND appears not to be necessary. At least in a bit of testing things still work.
This commit is contained in:
parent
311768697e
commit
0a4b8549cf
1 changed files with 94 additions and 163 deletions
|
@ -1003,88 +1003,28 @@ public class RelayService extends JobIntentService
|
||||||
private Thread m_UDPReadThread;
|
private Thread m_UDPReadThread;
|
||||||
private Thread m_UDPWriteThread;
|
private Thread m_UDPWriteThread;
|
||||||
private RelayService[] mServiceHolder = {null};
|
private RelayService[] mServiceHolder = {null};
|
||||||
private AtomicInteger mHaveServiceCount = new AtomicInteger();
|
|
||||||
|
|
||||||
// Here's the problem: In the Oreo world, RelayService instances come
|
|
||||||
// and go, typically lasting only long enough to disptach a single
|
|
||||||
// onHandleWork() call. So the UDPThreads instance is now a static
|
|
||||||
// singleton that lasts a really long time, across the lives of
|
|
||||||
// hundreds of RelayService instances. It and its threads, however,
|
|
||||||
// have to make calls on RelayService instances. So I need a way to
|
|
||||||
// block them when there's no instance available AND to block
|
|
||||||
// onDestroy() returning until all references to the current instance
|
|
||||||
// have been forgotten.
|
|
||||||
//
|
|
||||||
// The solution is to set and clear the instance through
|
|
||||||
// [un]setService(). And to wrap the code inside that needs
|
|
||||||
// references in a AutoCloseable that when instantiated blocks when
|
|
||||||
// the instance is null. Thus when setService() is called with a new
|
|
||||||
// instance, all the requests for an instance are unblocked and each
|
|
||||||
// of them increments a counter. When the AutoCloseable's close() is
|
|
||||||
// called, the counter is decremented. unsetService() waits until that
|
|
||||||
// counter is 0 (and so the close() method calls notifyAll() when the
|
|
||||||
// counter's dropped to 0.)
|
|
||||||
//
|
|
||||||
// It works like this: the threads run. When they need a reference to
|
|
||||||
// the current RelayService they block until there's one
|
|
||||||
// available. They're able to get the reference as long as it's
|
|
||||||
// non-null, but once it's null they start to block again. Once the
|
|
||||||
// last that didn't block finishes with its instance unsetService()
|
|
||||||
// returns (and so onDestroy() can as well.) This clears it so the OS
|
|
||||||
// can create another RelayService instance that will eventually get
|
|
||||||
// passed to setService() again.
|
|
||||||
|
|
||||||
private class ServiceWrapper implements AutoCloseable {
|
|
||||||
private final RelayService mService;
|
|
||||||
ServiceWrapper( RelayService service ) { mService = service; }
|
|
||||||
|
|
||||||
RelayService get() { return mService; }
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close()
|
|
||||||
{
|
|
||||||
synchronized ( mServiceHolder ) {
|
|
||||||
int newVal = mHaveServiceCount.decrementAndGet();
|
|
||||||
Log.d( TAG, "close(): count now %d", newVal );
|
|
||||||
if ( 0 == newVal ) {
|
|
||||||
mServiceHolder.notifyAll();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void setService( RelayService service )
|
void setService( RelayService service )
|
||||||
{
|
{
|
||||||
synchronized ( mServiceHolder ) {
|
synchronized ( mServiceHolder ) {
|
||||||
mServiceHolder[0] = service;
|
mServiceHolder[0] = service;
|
||||||
|
// unblock waiters for non-null Service
|
||||||
mServiceHolder.notifyAll();
|
mServiceHolder.notifyAll();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// When service is cleared, DO NOT RETURN until all threads using the
|
// It will be a few milliseconds before all threads using the current
|
||||||
// old value of the service have relinquished it. This is called from
|
// Service instance are done. Blocking the UI thread here until that
|
||||||
// onDestroy(), and once onDestroy() had returned nobody should
|
// happened make for a laggy UI, so I'm going to see if we can
|
||||||
// reference its RelayService instance again.
|
// continue to use the instance for a short while after returning.
|
||||||
void unsetService()
|
void unsetService()
|
||||||
{
|
{
|
||||||
Log.d( TAG, "unsetService()" );
|
|
||||||
synchronized ( mServiceHolder ) {
|
synchronized ( mServiceHolder ) {
|
||||||
mServiceHolder[0] = null;
|
mServiceHolder[0] = null;
|
||||||
// Now block until the used count drops to 0
|
|
||||||
while ( mHaveServiceCount.get() > 0 ) {
|
|
||||||
try {
|
|
||||||
mServiceHolder.wait();
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
Log.e( TAG, "wait() threw: %s", ie.getMessage() );
|
|
||||||
// Assert.assertFalse( BuildConfig.DEBUG );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Log.d( TAG, "unsetService() DONE" );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// called by various threads that need to run
|
private RelayService getService()
|
||||||
private ServiceWrapper acquireWrapper()
|
|
||||||
{
|
{
|
||||||
synchronized ( mServiceHolder ) {
|
synchronized ( mServiceHolder ) {
|
||||||
while ( null == mServiceHolder[0] ) {
|
while ( null == mServiceHolder[0] ) {
|
||||||
|
@ -1092,13 +1032,10 @@ public class RelayService extends JobIntentService
|
||||||
mServiceHolder.wait();
|
mServiceHolder.wait();
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
Log.e( TAG, "wait() threw: %s", ie.getMessage() );
|
Log.e( TAG, "wait() threw: %s", ie.getMessage() );
|
||||||
// InterruptedException is how we get unblocked!
|
|
||||||
// Assert.assertFalse( BuildConfig.DEBUG );
|
// Assert.assertFalse( BuildConfig.DEBUG );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int newVal = mHaveServiceCount.incrementAndGet();
|
return mServiceHolder[0];
|
||||||
Log.d( TAG, "acquireWrapper(): count now %d", newVal );
|
|
||||||
return new ServiceWrapper( mServiceHolder[0] );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1117,10 +1054,9 @@ public class RelayService extends JobIntentService
|
||||||
new DatagramPacket( buf, buf.length );
|
new DatagramPacket( buf, buf.length );
|
||||||
try {
|
try {
|
||||||
m_UDPSocket.receive( packet );
|
m_UDPSocket.receive( packet );
|
||||||
try (ServiceWrapper wrapper = acquireWrapper() ) {
|
final RelayService service = getService();
|
||||||
wrapper.get().resetExitTimer();
|
service.resetExitTimer();
|
||||||
wrapper.get().gotPacket( packet );
|
service.gotPacket( packet );
|
||||||
}
|
|
||||||
} catch ( java.io.InterruptedIOException iioe ) {
|
} catch ( java.io.InterruptedIOException iioe ) {
|
||||||
// DbgUtils.logf( "FYI: udp receive timeout" );
|
// DbgUtils.logf( "FYI: udp receive timeout" );
|
||||||
} catch( java.io.IOException ioe ) {
|
} catch( java.io.IOException ioe ) {
|
||||||
|
@ -1148,10 +1084,10 @@ public class RelayService extends JobIntentService
|
||||||
if ( null == m_UDPSocket ) {
|
if ( null == m_UDPSocket ) {
|
||||||
int port;
|
int port;
|
||||||
String host;
|
String host;
|
||||||
try ( ServiceWrapper wrapper = acquireWrapper() ) {
|
final RelayService service = getService();
|
||||||
port = XWPrefs.getDefaultRelayPort( wrapper.get() );
|
port = XWPrefs.getDefaultRelayPort( service );
|
||||||
host = XWPrefs.getDefaultRelayHost( wrapper.get() );
|
host = XWPrefs.getDefaultRelayHost( service );
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
m_UDPSocket = new DatagramSocket();
|
m_UDPSocket = new DatagramSocket();
|
||||||
m_UDPSocket.setSoTimeout(30 * 1000); // timeout so we can log
|
m_UDPSocket.setSoTimeout(30 * 1000); // timeout so we can log
|
||||||
|
@ -1206,9 +1142,8 @@ public class RelayService extends JobIntentService
|
||||||
sendViaWeb( dataListWeb );
|
sendViaWeb( dataListWeb );
|
||||||
sendViaUDP( dataListUDP );
|
sendViaUDP( dataListUDP );
|
||||||
|
|
||||||
try ( ServiceWrapper wrapper = acquireWrapper() ) {
|
getService().resetExitTimer();
|
||||||
wrapper.get().resetExitTimer();
|
|
||||||
}
|
|
||||||
runUDPAckTimer();
|
runUDPAckTimer();
|
||||||
|
|
||||||
ConnStatusHandler.showSuccessOut();
|
ConnStatusHandler.showSuccessOut();
|
||||||
|
@ -1235,52 +1170,50 @@ public class RelayService extends JobIntentService
|
||||||
Log.d( TAG, "sendViaWeb(): sending %d at once", packets.size() );
|
Log.d( TAG, "sendViaWeb(): sending %d at once", packets.size() );
|
||||||
int sentLen = 0;
|
int sentLen = 0;
|
||||||
if ( packets.size() > 0 ) {
|
if ( packets.size() > 0 ) {
|
||||||
try ( ServiceWrapper wrapper = acquireWrapper() ) {
|
final RelayService service = getService();
|
||||||
RelayService service = wrapper.get();
|
HttpURLConnection conn = NetUtils
|
||||||
HttpURLConnection conn = NetUtils
|
.makeHttpRelayConn( service, "post" );
|
||||||
.makeHttpRelayConn( service, "post" );
|
if ( null == conn ) {
|
||||||
if ( null == conn ) {
|
Log.e( TAG, "sendViaWeb(): null conn for POST" );
|
||||||
Log.e( TAG, "sendViaWeb(): null conn for POST" );
|
} else {
|
||||||
} else {
|
try {
|
||||||
try {
|
JSONArray dataArray = new JSONArray();
|
||||||
JSONArray dataArray = new JSONArray();
|
for ( PacketData packet : packets ) {
|
||||||
for ( PacketData packet : packets ) {
|
Assert.assertFalse( packet instanceof EOQPacketData );
|
||||||
Assert.assertFalse( packet instanceof EOQPacketData );
|
byte[] datum = packet.assemble();
|
||||||
byte[] datum = packet.assemble();
|
dataArray.put( Utils.base64Encode(datum) );
|
||||||
dataArray.put( Utils.base64Encode(datum) );
|
sentLen += datum.length;
|
||||||
sentLen += datum.length;
|
|
||||||
}
|
|
||||||
JSONObject params = new JSONObject();
|
|
||||||
params.put( "data", dataArray );
|
|
||||||
|
|
||||||
String result = NetUtils.runConn( conn, params );
|
|
||||||
boolean succeeded = null != result;
|
|
||||||
if ( succeeded ) {
|
|
||||||
Log.d( TAG, "sendViaWeb(): POST(%s) => %s", params, result );
|
|
||||||
JSONObject resultObj = new JSONObject( result );
|
|
||||||
JSONArray resData = resultObj.getJSONArray( "data" );
|
|
||||||
int nReplies = resData.length();
|
|
||||||
// Log.d( TAG, "sendViaWeb(): got %d replies", nReplies );
|
|
||||||
|
|
||||||
service
|
|
||||||
.noteSent( packets, false ); // before we process the acks below :-)
|
|
||||||
|
|
||||||
for ( int ii = 0; ii < nReplies; ++ii ) {
|
|
||||||
byte[] datum = Utils.base64Decode( resData.getString( ii ) );
|
|
||||||
// PENDING: skip ack or not
|
|
||||||
service.gotPacket( datum, false, false );
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Log.e( TAG, "sendViaWeb(): failed result for POST" );
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
ConnStatusHandler.updateStatus( service, null,
|
|
||||||
CommsConnType.COMMS_CONN_RELAY,
|
|
||||||
succeeded );
|
|
||||||
} catch ( JSONException ex ) {
|
|
||||||
Assert.assertFalse( BuildConfig.DEBUG );
|
|
||||||
}
|
}
|
||||||
|
JSONObject params = new JSONObject();
|
||||||
|
params.put( "data", dataArray );
|
||||||
|
|
||||||
|
String result = NetUtils.runConn( conn, params );
|
||||||
|
boolean succeeded = null != result;
|
||||||
|
if ( succeeded ) {
|
||||||
|
Log.d( TAG, "sendViaWeb(): POST(%s) => %s", params, result );
|
||||||
|
JSONObject resultObj = new JSONObject( result );
|
||||||
|
JSONArray resData = resultObj.getJSONArray( "data" );
|
||||||
|
int nReplies = resData.length();
|
||||||
|
// Log.d( TAG, "sendViaWeb(): got %d replies", nReplies );
|
||||||
|
|
||||||
|
service
|
||||||
|
.noteSent( packets, false ); // before we process the acks below :-)
|
||||||
|
|
||||||
|
for ( int ii = 0; ii < nReplies; ++ii ) {
|
||||||
|
byte[] datum = Utils.base64Decode( resData.getString( ii ) );
|
||||||
|
// PENDING: skip ack or not
|
||||||
|
service.gotPacket( datum, false, false );
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Log.e( TAG, "sendViaWeb(): failed result for POST" );
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ConnStatusHandler.updateStatus( service, null,
|
||||||
|
CommsConnType.COMMS_CONN_RELAY,
|
||||||
|
succeeded );
|
||||||
|
} catch ( JSONException ex ) {
|
||||||
|
Assert.assertFalse( BuildConfig.DEBUG );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1292,47 +1225,45 @@ public class RelayService extends JobIntentService
|
||||||
int sentLen = 0;
|
int sentLen = 0;
|
||||||
|
|
||||||
if ( packets.size() > 0 ) {
|
if ( packets.size() > 0 ) {
|
||||||
try ( ServiceWrapper wrapper = acquireWrapper() ) {
|
final RelayService service = getService();
|
||||||
RelayService service = wrapper.get();
|
service.noteSent( packets, true );
|
||||||
service.noteSent( packets, true );
|
for ( PacketData packet : packets ) {
|
||||||
for ( PacketData packet : packets ) {
|
boolean getOut = true;
|
||||||
boolean getOut = true;
|
byte[] data = packet.assemble();
|
||||||
byte[] data = packet.assemble();
|
try {
|
||||||
try {
|
DatagramPacket udpPacket = new DatagramPacket( data, data.length );
|
||||||
DatagramPacket udpPacket = new DatagramPacket( data, data.length );
|
m_UDPSocket.send( udpPacket );
|
||||||
m_UDPSocket.send( udpPacket );
|
|
||||||
|
|
||||||
sentLen += udpPacket.getLength();
|
sentLen += udpPacket.getLength();
|
||||||
// packet.setSentMS( nowMS );
|
// packet.setSentMS( nowMS );
|
||||||
getOut = false;
|
getOut = false;
|
||||||
} catch ( java.net.SocketException se ) {
|
} catch ( java.net.SocketException se ) {
|
||||||
Log.ex( TAG, se );
|
Log.ex( TAG, se );
|
||||||
Log.i( TAG, "Restarting threads to force new socket" );
|
Log.i( TAG, "Restarting threads to force new socket" );
|
||||||
ConnStatusHandler.updateStatusOut( service, null,
|
ConnStatusHandler.updateStatusOut( service, null,
|
||||||
CommsConnType.COMMS_CONN_RELAY,
|
CommsConnType.COMMS_CONN_RELAY,
|
||||||
true );
|
true );
|
||||||
|
|
||||||
service.m_handler.post( new Runnable() {
|
service.m_handler.post( new Runnable() {
|
||||||
public void run() {
|
public void run() {
|
||||||
Assert.assertFalse( BuildConfig.DEBUG );
|
Assert.assertFalse( BuildConfig.DEBUG );
|
||||||
// stopUDPThreadsIf();
|
// stopUDPThreadsIf();
|
||||||
}
|
}
|
||||||
} );
|
} );
|
||||||
break;
|
break;
|
||||||
} catch ( java.io.IOException ioe ) {
|
} catch ( java.io.IOException ioe ) {
|
||||||
Log.ex( TAG, ioe );
|
Log.ex( TAG, ioe );
|
||||||
} catch ( NullPointerException npe ) {
|
} catch ( NullPointerException npe ) {
|
||||||
Log.w( TAG, "network problem; dropping packet" );
|
Log.w( TAG, "network problem; dropping packet" );
|
||||||
}
|
}
|
||||||
if ( getOut ) {
|
if ( getOut ) {
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConnStatusHandler.updateStatus( service, null,
|
|
||||||
CommsConnType.COMMS_CONN_RELAY,
|
|
||||||
sentLen > 0 );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ConnStatusHandler.updateStatus( service, null,
|
||||||
|
CommsConnType.COMMS_CONN_RELAY,
|
||||||
|
sentLen > 0 );
|
||||||
}
|
}
|
||||||
|
|
||||||
return sentLen;
|
return sentLen;
|
||||||
|
|
Loading…
Add table
Reference in a new issue