void *context2 = closure.mContext2;
int parameter1 = closure.mParameter1;
switch (kind) {
- case CLOSURE_KIND_PPI:
+ case CLOSURE_KIND_PPI:
{
- ClosureHandler_ppi handler_ppi;
- handler_ppi = closure.mHandler.mHandler_ppi;
+ ClosureHandler_ppi handler_ppi = closure.mHandler.mHandler_ppi;
assert(NULL != handler_ppi);
(*handler_ppi)(context1, context2, parameter1);
}
break;
- case CLOSURE_KIND_PPII:
+ case CLOSURE_KIND_PPII:
{
- ClosureHandler_ppii handler_ppii;
- handler_ppii = closure.mHandler.mHandler_ppii;
+ ClosureHandler_ppii handler_ppii = closure.mHandler.mHandler_ppii;
assert(NULL != handler_ppii);
int parameter2 = closure.mParameter2;
(*handler_ppii)(context1, context2, parameter1, parameter2);
}
break;
- default:
+ case CLOSURE_KIND_PIIPP:
+ {
+ ClosureHandler_piipp handler_piipp = closure.mHandler.mHandler_piipp;
+ assert(NULL != handler_piipp);
+ int parameter2 = closure.mParameter2;
+ void *context3 = closure.mContext3;
+ (*handler_piipp)(context1, parameter1, parameter2, context2, context3);
+ }
+ break;
+ default:
SL_LOGE("Unexpected callback kind %d", kind);
assert(false);
break;
// Enqueue a closure to be executed later by a worker thread.
// Note that this raw interface requires an explicit "kind" and full parameter list.
// There are convenience methods below that make this easier to use.
-SLresult ThreadPool_add(ThreadPool *tp, ClosureKind kind, ClosureHandler_ppii handler,
- void *context1, void *context2, int parameter1, int parameter2)
+SLresult ThreadPool_add(ThreadPool *tp, ClosureKind kind, ClosureHandler_generic handler,
+ void *context1, void *context2, void *context3, int parameter1, int parameter2)
{
assert(NULL != tp);
assert(NULL != handler);
return SL_RESULT_RESOURCE_ERROR;
}
closure->mKind = kind;
- // note this will automagically assign to mHandler_ppi also
- closure->mHandler.mHandler_ppii = handler;
+ switch(kind) {
+ case CLOSURE_KIND_PPI:
+ closure->mHandler.mHandler_ppi = (ClosureHandler_ppi)handler;
+ break;
+ case CLOSURE_KIND_PPII:
+ closure->mHandler.mHandler_ppii = (ClosureHandler_ppii)handler;
+ break;
+ case CLOSURE_KIND_PIIPP:
+ closure->mHandler.mHandler_piipp = (ClosureHandler_piipp)handler;
+ break;
+ default:
+ SL_LOGE("ThreadPool_add() invalid closure kind %d", kind);
+ assert(false);
+ }
closure->mContext1 = context1;
closure->mContext2 = context2;
+ closure->mContext3 = context3;
closure->mParameter1 = parameter1;
closure->mParameter2 = parameter2;
int ok;
void *context1, void *context2, int parameter1)
{
// function pointers are the same size so this is a safe cast
- return ThreadPool_add(tp, CLOSURE_KIND_PPI, (ClosureHandler_ppii) handler,
- context1, context2, parameter1, 0);
+ return ThreadPool_add(tp, CLOSURE_KIND_PPI, (ClosureHandler_generic) handler,
+ context1, context2, NULL, parameter1, 0);
}
SLresult ThreadPool_add_ppii(ThreadPool *tp, ClosureHandler_ppii handler,
void *context1, void *context2, int parameter1, int parameter2)
{
- // note that no cast is needed for handler because it is already the right type
- return ThreadPool_add(tp, CLOSURE_KIND_PPII, handler, context1, context2, parameter1,
- parameter2);
+ // function pointers are the same size so this is a safe cast
+ return ThreadPool_add(tp, CLOSURE_KIND_PPII, (ClosureHandler_generic) handler,
+ context1, context2, NULL, parameter1, parameter2);
+}
+
+SLresult ThreadPool_add_piipp(ThreadPool *tp, ClosureHandler_piipp handler,
+ void *cntxt1, int param1, int param2, void *cntxt2, void *cntxt3)
+{
+ // function pointers are the same size so this is a safe cast
+ return ThreadPool_add(tp, CLOSURE_KIND_PIIPP, (ClosureHandler_generic) handler,
+ cntxt1, cntxt2, cntxt3, param1, param2);
}
typedef enum {
CLOSURE_KIND_PPI, // void *, void *, int
- CLOSURE_KIND_PPII // void *, void *, int, int
+ CLOSURE_KIND_PPII, // void *, void *, int, int
+ CLOSURE_KIND_PIIPP // void *, int, int, void *, void *
} ClosureKind;
/** Closure handlers */
-typedef void (*ClosureHandler_ppi)(void *context1, void *context2, int parameter1);
-typedef void (*ClosureHandler_ppii)(void *context1, void *context2, int parameter1, int parameter2);
+typedef void (*ClosureHandler_generic)(void *p1, void *p2, void *p3, int i1, int i2);
+typedef void (*ClosureHandler_ppi) (void *p1, void *p2, int i1);
+typedef void (*ClosureHandler_ppii) (void *p1, void *p2, int i1, int i2);
+typedef void (*ClosureHandler_piipp) (void *p1, int i1, int i2, void *p2, void *p3);
/** \brief Closure represents a deferred computation */
union {
ClosureHandler_ppi mHandler_ppi;
ClosureHandler_ppii mHandler_ppii;
+ ClosureHandler_piipp mHandler_piipp;
} mHandler;
ClosureKind mKind;
void *mContext1;
void *mContext2;
+ void *mContext3;
int mParameter1;
int mParameter2;
} Closure;
extern SLresult ThreadPool_init(ThreadPool *tp, unsigned maxClosures, unsigned maxThreads);
extern void ThreadPool_deinit(ThreadPool *tp);
extern SLresult ThreadPool_add(ThreadPool *tp, ClosureKind kind,
- void (*handler)(void *, void *, int, int), void *context1,
- void *context2, int parameter1, int parameter2);
+ ClosureHandler_generic,
+ void *cntxt1, void *cntxt2, int param1, int param2);
extern Closure *ThreadPool_remove(ThreadPool *tp);
extern SLresult ThreadPool_add_ppi(ThreadPool *tp, ClosureHandler_ppi handler,
- void *context1, void *context2, int parameter1);
+ void *cntxt1, void *cntxt2, int param1);
extern SLresult ThreadPool_add_ppii(ThreadPool *tp, ClosureHandler_ppii handler,
- void *context1, void *context2, int parameter1, int parameter2);
+ void *cntxt1, void *cntxt2, int param1, int param2);
+extern SLresult ThreadPool_add_piipp(ThreadPool *tp, ClosureHandler_piipp handler,
+ void *cntxt1, int param1, int param2, void *cntxt2, void *cntxt3);
object_unlock_exclusive(&mp->mObject);
- // notify (outside of lock) that the stream information has been updated
+ // enqueue notification (outside of lock) that the stream information has been updated
if ((NULL != callback) && (index >= 0)) {
+#ifdef XA_SYNCHRONOUS_STREAMCBEVENT_PROPERTYCHANGE
(*callback)(&mp->mStreamInfo.mItf, XA_STREAMCBEVENT_PROPERTYCHANGE /*eventId*/,
1 /*streamIndex, only one stream supported here, 0 is reserved*/,
NULL /*pEventData, always NULL in OpenMAX AL 1.0.1*/,
callbackPContext /*pContext*/);
+#else
+ SLresult res = EnqueueAsyncCallback_piipp(mp, callback,
+ /*p1*/ &mp->mStreamInfo.mItf,
+ /*i1*/ XA_STREAMCBEVENT_PROPERTYCHANGE /*eventId*/,
+ /*i2*/ 1 /*streamIndex, only one stream supported here, 0 is reserved*/,
+ /*p2*/ NULL /*pEventData, always NULL in OpenMAX AL 1.0.1*/,
+ /*p3*/ callbackPContext /*pContext*/);
+#endif
}
break;
}
}
object_unlock_exclusive(&mp->mObject);
- // callback with no lock held
+ // enqueue callback with no lock held
if (NULL != playCallback) {
+#ifdef XA_SYNCHRONOUS_PLAYEVENT_HEADATEND
(*playCallback)(&mp->mPlay.mItf, playContext, XA_PLAYEVENT_HEADATEND);
+#else
+ SLresult res = EnqueueAsyncCallback_ppi(mp, playCallback, &mp->mPlay.mItf, playContext,
+ XA_PLAYEVENT_HEADATEND);
+ LOGW_IF(SL_RESULT_SUCCESS != res,
+ "Callback %p(%p, %p, XA_PLAYEVENT_HEADATEND) dropped", playCallback,
+ &mp->mPlay.mItf, playContext);
+#endif
}
break;
}
#define EnqueueAsyncCallback_ppii(object, handler, p1, p2, i1, i2) \
ThreadPool_add_ppii(&(object)->mObject.mEngine->mThreadPool, \
(ClosureHandler_ppii) (handler), (p1), (p2), (i1), (i2))
+#define EnqueueAsyncCallback_piipp(object, handler, p1, i1, i2, p2, p3) \
+ ThreadPool_add_piipp(&(object)->mObject.mEngine->mThreadPool, \
+ (ClosureHandler_piipp) (handler), (p1), (i1), (i2), (p2), (p3))