Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 0 additions & 38 deletions iocore/eventsystem/I_Lock.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,42 +70,8 @@
*/
#define MUTEX_TRY_LOCK(_l, _m, _t) MutexTryLock _l(MakeSourceLocation(), (char *)nullptr, _m, _t)

/**
Attempts to acquire the lock to the ProxyMutex.

This macro performs up to the specified number of attempts to
acquire the lock on the ProxyMutex object. It does so by running
a busy loop (busy wait) '_sc' times. You should use it with care
since it blocks the thread during that time and wastes CPU time.

@param _l Arbitrary name for the lock to use in this call (lock variable)
@param _m A pointer to (or address of) a ProxyMutex object
@param _t The current EThread executing your code.
@param _sc The number of attempts or spin count. It must be a positive value.

*/
#define MUTEX_TRY_LOCK_SPIN(_l, _m, _t, _sc) MutexTryLock _l(MakeSourceLocation(), (char *)nullptr, _m, _t, _sc)

/**
Attempts to acquire the lock to the ProxyMutex.

This macro attempts to acquire the lock to the specified ProxyMutex
object in a non-blocking manner. After using the macro you can
see if it was successful by comparing the lock variable with true
or false (the variable name passed in the _l parameter).

@param _l Arbitrary name for the lock to use in this call (lock variable)
@param _m A pointer to (or address of) a ProxyMutex object
@param _t The current EThread executing your code.
@param _c Continuation whose mutex will be attempted to lock.

*/

#define MUTEX_TRY_LOCK_FOR(_l, _m, _t, _c) MutexTryLock _l(MakeSourceLocation(), nullptr, _m, _t)
#else // DEBUG
#define MUTEX_TRY_LOCK(_l, _m, _t) MutexTryLock _l(_m, _t)
#define MUTEX_TRY_LOCK_SPIN(_l, _m, _t, _sc) MutexTryLock _l(_m, _t, _sc)
#define MUTEX_TRY_LOCK_FOR(_l, _m, _t, _c) MutexTryLock _l(_m, _t)
#endif // DEBUG

/**
Expand All @@ -126,12 +92,8 @@
// DEPRECATED DEPRECATED DEPRECATED
#ifdef DEBUG
#define MUTEX_TAKE_TRY_LOCK(_m, _t) Mutex_trylock(MakeSourceLocation(), (char *)nullptr, _m, _t)
#define MUTEX_TAKE_TRY_LOCK_FOR(_m, _t, _c) Mutex_trylock(MakeSourceLocation(), (char *)nullptr, _m, _t)
#define MUTEX_TAKE_TRY_LOCK_FOR_SPIN(_m, _t, _c, _sc) Mutex_trylock_spin(MakeSourceLocation(), nullptr, _m, _t, _sc)
#else
#define MUTEX_TAKE_TRY_LOCK(_m, _t) Mutex_trylock(_m, _t)
#define MUTEX_TAKE_TRY_LOCK_FOR(_m, _t, _c) Mutex_trylock(_m, _t)
#define MUTEX_TAKE_TRY_LOCK_FOR_SPIN(_m, _t, _c, _sc) Mutex_trylock_spin(_m, _t, _sc)
#endif

#ifdef DEBUG
Expand Down
2 changes: 1 addition & 1 deletion iocore/eventsystem/UnixEThread.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ void
EThread::process_event(Event *e, int calling_code)
{
ink_assert((!e->in_the_prot_queue && !e->in_the_priority_queue));
MUTEX_TRY_LOCK_FOR(lock, e->mutex, this, e->continuation);
MUTEX_TRY_LOCK(lock, e->mutex, this);
if (!lock.is_locked()) {
e->timeout_at = cur_time + DELAY_FOR_RETRY;
EventQueueExternal.enqueue_local(e);
Expand Down
14 changes: 7 additions & 7 deletions iocore/hostdb/HostDB.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1204,7 +1204,7 @@ HostDBContinuation::dnsPendingEvent(int event, Event *e)
}
if (event == EVENT_INTERVAL) {
// we timed out, return a failure to the user
MUTEX_TRY_LOCK_FOR(lock, action.mutex, ((Event *)e)->ethread, action.continuation);
MUTEX_TRY_LOCK(lock, action.mutex, ((Event *)e)->ethread);
if (!lock.is_locked()) {
timeout = eventProcessor.schedule_in(this, HOST_DB_RETRY_PERIOD);
return EVENT_CONT;
Expand Down Expand Up @@ -1259,7 +1259,7 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
hostdb_cont_free(this);
return EVENT_DONE;
}
MUTEX_TRY_LOCK_FOR(lock, action.mutex, thread, action.continuation);
MUTEX_TRY_LOCK(lock, action.mutex, thread);
if (!lock.is_locked()) {
timeout = thread->schedule_in(this, HOST_DB_RETRY_PERIOD);
return EVENT_CONT;
Expand Down Expand Up @@ -1494,12 +1494,12 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
// Since reply_to_cont will call the hanlder on the action.continuation, it is important that we hold
// that mutex.
bool need_to_reschedule = true;
MUTEX_TRY_LOCK_FOR(lock, action.mutex, thread, action.continuation);
MUTEX_TRY_LOCK(lock, action.mutex, thread);
if (lock.is_locked()) {
need_to_reschedule = !action.cancelled;
if (!action.cancelled) {
if (action.continuation->mutex) {
MUTEX_TRY_LOCK_FOR(lock2, action.continuation->mutex, thread, action.continuation);
MUTEX_TRY_LOCK(lock2, action.continuation->mutex, thread);
if (lock2.is_locked()) {
reply_to_cont(action.continuation, r, is_srv());
need_to_reschedule = false;
Expand Down Expand Up @@ -1534,7 +1534,7 @@ HostDBContinuation::iterateEvent(int event, Event *e)
ink_assert(!link.prev && !link.next);
EThread *t = e ? e->ethread : this_ethread();

MUTEX_TRY_LOCK_FOR(lock, action.mutex, t, action.continuation);
MUTEX_TRY_LOCK(lock, action.mutex, t);
if (!lock.is_locked()) {
Debug("hostdb", "iterateEvent event=%d eventp=%p: reschedule due to not getting action mutex", event, e);
mutex->thread_holding->schedule_in(this, HOST_DB_RETRY_PERIOD);
Expand All @@ -1550,7 +1550,7 @@ HostDBContinuation::iterateEvent(int event, Event *e)
if (current_iterate_pos < hostDB.refcountcache->partition_count()) {
// TODO: configurable number at a time?
ProxyMutex *bucket_mutex = hostDB.refcountcache->get_partition(current_iterate_pos).lock.get();
MUTEX_TRY_LOCK_FOR(lock_bucket, bucket_mutex, t, this);
MUTEX_TRY_LOCK(lock_bucket, bucket_mutex, t);
if (!lock_bucket.is_locked()) {
// we couldn't get the bucket lock, let's just reschedule and try later.
Debug("hostdb", "iterateEvent event=%d eventp=%p: reschedule due to not getting bucket mutex", event, e);
Expand Down Expand Up @@ -1591,7 +1591,7 @@ HostDBContinuation::probeEvent(int /* event ATS_UNUSED */, Event *e)
ink_assert(!link.prev && !link.next);
EThread *t = e ? e->ethread : this_ethread();

MUTEX_TRY_LOCK_FOR(lock, action.mutex, t, action.continuation);
MUTEX_TRY_LOCK(lock, action.mutex, t);
if (!lock.is_locked()) {
mutex->thread_holding->schedule_in(this, HOST_DB_RETRY_PERIOD);
return EVENT_CONT;
Expand Down
2 changes: 1 addition & 1 deletion iocore/net/SSLNetVConnection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ SSLNetVConnection::net_read_io(NetHandler *nh, EThread *lthread)
return;
}

MUTEX_TRY_LOCK_FOR(lock, s->vio.mutex, lthread, s->vio.cont);
MUTEX_TRY_LOCK(lock, s->vio.mutex, lthread);
if (!lock.is_locked()) {
readReschedule(nh);
return;
Expand Down
4 changes: 2 additions & 2 deletions iocore/net/UnixNetVConnection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ read_from_net(NetHandler *nh, UnixNetVConnection *vc, EThread *thread)
ProxyMutex *mutex = thread->mutex.get();
int64_t r = 0;

MUTEX_TRY_LOCK_FOR(lock, s->vio.mutex, thread, s->vio.cont);
MUTEX_TRY_LOCK(lock, s->vio.mutex, thread);

if (!lock.is_locked()) {
read_reschedule(nh, vc);
Expand Down Expand Up @@ -367,7 +367,7 @@ write_to_net_io(NetHandler *nh, UnixNetVConnection *vc, EThread *thread)
NetState *s = &vc->write;
ProxyMutex *mutex = thread->mutex.get();

MUTEX_TRY_LOCK_FOR(lock, s->vio.mutex, thread, s->vio.cont);
MUTEX_TRY_LOCK(lock, s->vio.mutex, thread);

if (!lock.is_locked() || lock.get_mutex() != s->vio.mutex.get()) {
write_reschedule(nh, vc);
Expand Down
2 changes: 1 addition & 1 deletion iocore/net/UnixUDPNet.cc
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ UDPNetProcessorInternal::udp_callback(UDPNetHandler *nh, UDPConnection *xuc, ETh
UnixUDPConnection *uc = (UnixUDPConnection *)xuc;

if (uc->continuation && uc->mutex) {
MUTEX_TRY_LOCK_FOR(lock, uc->mutex, thread, uc->continuation);
MUTEX_TRY_LOCK(lock, uc->mutex, thread);
if (!lock.is_locked()) {
return 1;
}
Expand Down