summaryrefslogtreecommitdiff
path: root/open-vm-tools/lib/lock/ul.c
blob: 98af2e1306510e0dd103a0c17ddfb6613d40ad4c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
/*********************************************************
 * Copyright (C) 2009-2015 VMware, Inc. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation version 2.1 and no later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the Lesser GNU General Public
 * License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA.
 *
 *********************************************************/

#include "vmware.h"
#include "str.h"
#include "util.h"
#include "userlock.h"
#include "ulInt.h"
#include "ulIntShared.h"
#include "hashTable.h"
#include "random.h"

static Bool mxInPanic = FALSE;  // track when involved in a panic

Bool (*MXUserTryAcquireForceFail)() = NULL;

static MX_Rank (*MXUserMxCheckRank)(void) = NULL;
static void (*MXUserMxLockLister)(void) = NULL;
void (*MXUserMX_LockRec)(struct MX_MutexRec *lock) = NULL;
void (*MXUserMX_UnlockRec)(struct MX_MutexRec *lock) = NULL;
Bool (*MXUserMX_TryLockRec)(struct MX_MutexRec *lock) = NULL;
Bool (*MXUserMX_IsLockedByCurThreadRec)(const struct MX_MutexRec *lock) = NULL;
static void (*MXUserMX_SetInPanic)(void) = NULL;
static Bool (*MXUserMX_InPanic)(void) = NULL;

#define MXUSER_NOISE_FLOOR_TRIALS 1000


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserInternalSingleton --
 *
 *      A "singleton" function for the MXUser internal recursive lock.
 *
 *      Internal MXUser recursive locks have no statistics gathering or
 *      tracking abilities. They need to used with care and rarely.
 *
 * Results:
 *      NULL    Failure
 *      !NULL   A pointer to an initialized MXRecLock
 *
 * Side effects:
 *      Manifold.
 *
 *-----------------------------------------------------------------------------
 */

MXRecLock *
MXUserInternalSingleton(Atomic_Ptr *storage)  // IN:
{
   MXRecLock *lock = (MXRecLock *) Atomic_ReadPtr(storage);

   if (UNLIKELY(lock == NULL)) {
      MXRecLock *newLock = Util_SafeMalloc(sizeof *newLock);

      if (MXRecLockInit(newLock)) {
         lock = Atomic_ReadIfEqualWritePtr(storage, NULL, (void *) newLock);

         if (lock) {
            MXRecLockDestroy(newLock);
            free(newLock);
         } else {
            lock = Atomic_ReadPtr(storage);
         }
      } else {
         free(newLock);
         lock = Atomic_ReadPtr(storage);  // maybe another thread succeeded
      }
   }

   return lock;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserSydrome --
 *
 *      Generate the syndrome bits for this MXUser library.
 *
 *      Each MXUser library has unique syndrome bits enabling the run time
 *      detection of locks created with one copy of the MXUser library and
 *      passed to another copy of the MXUser library.
 *
 *      The syndrome bits are important as they prevent incompatible versions
 *      of the MXUser library from trashing each other.
 *
 *      The bits are generated by using a source of bits that is external to
 *      a program and its libraries. This way no code or data based scheme
 *      can be spoofed or aliased.
 *
 * Results:
 *      As above
 *
 * Side effects:
 *      None
 *
 *-----------------------------------------------------------------------------
 */

static uint32
MXUserSyndrome(void)
{
   uint32 syndrome;
   static Atomic_uint32 syndromeMem;  // implicitly zero -- mbellon

   syndrome = Atomic_Read(&syndromeMem);

   if (syndrome == 0) {
#if defined(_WIN32)
      syndrome = GetTickCount();
#else
      syndrome = time(NULL) & 0xFFFFFFFF;
#endif

      /*
       * Protect against a total failure.
       */

      if (syndrome == 0) {
         syndrome++;
      }

      /* blind write; if racing one thread or the other will do */
      Atomic_ReadIfEqualWrite(&syndromeMem, 0, syndrome);

      syndrome = Atomic_Read(&syndromeMem);
   }

   ASSERT(syndrome);

   return syndrome;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserGetSignature --
 *
 *      Return a signature appropriate for the specified object type.
 *
 * Results:
 *      As above
 *
 * Side effects:
 *      None
 *
 *-----------------------------------------------------------------------------
 */

uint32
MXUserGetSignature(MXUserObjectType objectType)  // IN:
{
   uint32 signature;

   ASSERT((objectType >= 0) && (objectType < 16) &&
          (objectType != MXUSER_TYPE_NEVER_USE));

   /*
    * Use a random syndrome combined with a unique bit pattern mapping
    * of objectType to bits in a nibble. The random portion of the signature
    * can be used to catch multiple copies of lib/lock that are "leaking"
    * locks between them (which may be incompatible due to internal changes).
    */

   signature = (MXUserSyndrome() & 0x0FFFFFFF) | (objectType << 28);

   ASSERT(signature);

   return signature;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserDumpAndPanic --
 *
 *      Dump a lock, print a message and die
 *
 * Results:
 *      A panic.
 *
 * Side effects:
 *      Manifold.
 *
 *-----------------------------------------------------------------------------
 */

void
MXUserDumpAndPanic(MXUserHeader *header,  // IN:
                   const char *fmt,       // IN:
                   ...)                   // IN:
{
   char *msg;
   va_list ap;

   ASSERT((header != NULL) && (header->dumpFunc != NULL));

   (*header->dumpFunc)(header);

   va_start(ap, fmt);
   msg = Str_SafeVasprintf(NULL, fmt, ap);
   va_end(ap);

   Panic("%s", msg);
}


/*
 *---------------------------------------------------------------------
 *
 *  MXUser_SetInPanic --
 *	Notify the locking system that a panic is occurring.
 *
 *  Results:
 *     Set the "in a panic" state both in userland, and monitor, if applicable.
 *
 *  Side effects:
 *     None
 *
 *---------------------------------------------------------------------
 */

void
MXUser_SetInPanic(void)
{
   mxInPanic = TRUE;
   if (MXUserMX_SetInPanic != NULL) {
      MXUserMX_SetInPanic();
   }
}


/*
 *---------------------------------------------------------------------
 *
 *  MXUser_InPanic --
 *	Is the caller in the midst of a panic?
 *
 *  Results:
 *     TRUE   Yes
 *     FALSE  No
 *
 *  Side effects:
 *     None
 *
 *---------------------------------------------------------------------
 */

Bool
MXUser_InPanic(void)
{
   return mxInPanic || (MXUserMX_InPanic != NULL && MXUserMX_InPanic());
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserInstallMxHooks --
 *
 *      The MX facility may notify the MXUser facility that it is place and
 *      that MXUser should check with it. This function should be called from
 *      MX_Init.
 *
 * Results:
 *      As Above.
 *
 * Side effects:
 *      None.
 *
 *-----------------------------------------------------------------------------
 */

void
MXUserInstallMxHooks(void (*theLockListFunc)(void),
                     MX_Rank (*theRankFunc)(void),
                     void (*theLockFunc)(struct MX_MutexRec *lock),
                     void (*theUnlockFunc)(struct MX_MutexRec *lock),
                     Bool (*theTryLockFunc)(struct MX_MutexRec *lock),
                     Bool (*theIsLockedFunc)(const struct MX_MutexRec *lock),
                     void (*theSetInPanicFunc)(void),
                     Bool (*theInPanicFunc)(void))
{
   /*
    * This function can be called more than once but the second and later
    * invocations must be attempting to install the same hook functions as
    * the first invocation.
    */

   if ((MXUserMxLockLister == NULL) &&
       (MXUserMxCheckRank == NULL) &&
       (MXUserMX_LockRec == NULL) &&
       (MXUserMX_UnlockRec == NULL) &&
       (MXUserMX_TryLockRec == NULL) &&
       (MXUserMX_IsLockedByCurThreadRec == NULL) &&
       (MXUserMX_SetInPanic == NULL) &&
       (MXUserMX_InPanic == NULL)
       ) {
      MXUserMxLockLister = theLockListFunc;
      MXUserMxCheckRank = theRankFunc;
      MXUserMX_LockRec = theLockFunc;
      MXUserMX_UnlockRec = theUnlockFunc;
      MXUserMX_TryLockRec = theTryLockFunc;
      MXUserMX_IsLockedByCurThreadRec = theIsLockedFunc;
      MXUserMX_SetInPanic = theSetInPanicFunc;
      MXUserMX_InPanic = theInPanicFunc;
   } else {
      ASSERT((MXUserMxLockLister == theLockListFunc) &&
             (MXUserMxCheckRank == theRankFunc) &&
             (MXUserMX_LockRec == theLockFunc) &&
             (MXUserMX_UnlockRec == theUnlockFunc) &&
             (MXUserMX_TryLockRec == theTryLockFunc) &&
             (MXUserMX_IsLockedByCurThreadRec == theIsLockedFunc) &&
             (MXUserMX_SetInPanic == theSetInPanicFunc) &&
             (MXUserMX_InPanic == theInPanicFunc)
            );
   }
}

#if defined(MXUSER_DEBUG)
#define MXUSER_MAX_LOCKS_PER_THREAD (2 * MXUSER_MAX_REC_DEPTH)

typedef struct MXUserPerThread {
   struct MXUserPerThread  *next;
   uint32                   locksHeld;
   MXUserHeader            *lockArray[MXUSER_MAX_LOCKS_PER_THREAD];
} MXUserPerThread;

static Atomic_Ptr perThreadLockMem;
static MXUserPerThread *perThreadFreeList = NULL;

static Atomic_Ptr hashTableMem;


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserAllocPerThread --
 *
 *     Allocate a perThread structure.
 *
 *     Memory is allocated for the specified thread as necessary. Use a
 *     victim cache in front of malloc to provide a slight performance
 *     advantage. The lock here is equivalent to the lock buried inside
 *     malloc but no complex calculations are necessary to perform an
 *     allocation most of the time.
 *
 *     The maximum size of the list will be roughly the maximum number of
 *     threads having taken locks at the same time - a bounded number less
 *     than or equal to the maximum of threads created.
 *
 * Results:
 *     As above.
 *
 * Side effects:
 *      Memory may be allocated.
 *
 *-----------------------------------------------------------------------------
 */

static MXUserPerThread *
MXUserAllocPerThread(void)
{
   MXUserPerThread *perThread;
   MXRecLock *perThreadLock = MXUserInternalSingleton(&perThreadLockMem);

   ASSERT(perThreadLock);

   MXRecLockAcquire(perThreadLock,
                    NULL);          // non-stats

   if (perThreadFreeList == NULL) {
      perThread = Util_SafeMalloc(sizeof *perThread);
   } else {
      perThread = perThreadFreeList;
      perThreadFreeList = perThread->next;
   }

   MXRecLockRelease(perThreadLock);

   ASSERT(perThread);

   memset(perThread, 0, sizeof *perThread);  // ensure all zeros

   return perThread;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserFreePerThread --
 *
 *     Free a perThread structure.
 *
 *     The structure is placed on the free list -- for "later".
 *
 * Results:
 *     As above.
 *
 * Side effects:
 *      None
 *
 *-----------------------------------------------------------------------------
 */

static void
MXUserFreePerThread(MXUserPerThread *perThread)  // IN:
{
   MXRecLock *perThreadLock;

   ASSERT(perThread);
   ASSERT(perThread->next == NULL);

   perThreadLock = MXUserInternalSingleton(&perThreadLockMem);
   ASSERT(perThreadLock);

   MXRecLockAcquire(perThreadLock,
                    NULL);          // non-stats
   perThread->next = perThreadFreeList;
   perThreadFreeList = perThread;
   MXRecLockRelease(perThreadLock);
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserGetPerThread --
 *
 *      Return a pointer to the per thread data for the specified thread.
 *
 *      Memory is allocated for the specified thread as necessary. This memory
 *      is never released since it it is highly likely a thread will use a
 *      lock and need to record data in the perThread.
 *
 * Results:
 *      NULL   mayAlloc was FALSE and the thread doesn't have a perThread
 *     !NULL   the perThread of the specified thread
 *
 * Side effects:
 *      Memory may be allocated.
 *
 *-----------------------------------------------------------------------------
 */

static MXUserPerThread *
MXUserGetPerThread(Bool mayAlloc)  // IN: alloc perThread if not present?
{
   HashTable *hash;
   MXUserPerThread *perThread = NULL;
   void *tid = MXUserCastedThreadID();

   hash = HashTable_AllocOnce(&hashTableMem, 1024,
                              HASH_INT_KEY | HASH_FLAG_ATOMIC, NULL);

   if (!HashTable_Lookup(hash, tid, (void **) &perThread)) {
      /* No entry for this tid was found, allocate one? */

      if (mayAlloc) {
         MXUserPerThread *newEntry = MXUserAllocPerThread();

         /*
          * Attempt to (racey) insert a perThread on behalf of the specified
          * thread. If yet another thread takes care of this first, clean up
          * the mess.
          */

         perThread = HashTable_LookupOrInsert(hash, tid, newEntry);
         ASSERT(perThread);

         if (perThread != newEntry) {
            MXUserFreePerThread(newEntry);
         }
      } else {
         perThread = NULL;
      }
   }

   return perThread;
}

/*
 *-----------------------------------------------------------------------------
 *
 * MXUserListLocks
 *
 *      Allow a caller to list, via warnings, the list of locks the caller
 *      has acquired. Ensure that no memory for lock tracking is allocated
 *      if no locks have been taken.
 *
 * Results:
 *      The list is printed
 *
 * Side effects:
 *      None
 *
 *-----------------------------------------------------------------------------
 */

void
MXUserListLocks(void)
{
   MXUserPerThread *perThread = MXUserGetPerThread(FALSE);

   if (perThread != NULL) {
      uint32 i;

      for (i = 0; i < perThread->locksHeld; i++) {
         MXUserHeader *hdr = perThread->lockArray[i];

         Warning("\tMXUser lock %s (@0x%p) rank 0x%x\n", hdr->name, hdr,
                 hdr->rank);
      }
   }
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUser_IsCurThreadHoldingLocks --
 *
 *      Are any MXUser locks held by the calling thread?
 *
 * Results:
 *      TRUE   Yes
 *      FALSE  No
 *
 * Side effects:
 *      None
 *
 *-----------------------------------------------------------------------------
 */

Bool
MXUser_IsCurThreadHoldingLocks(void)
{
   MXUserPerThread *perThread = MXUserGetPerThread(FALSE);

   return (perThread == NULL) ? FALSE : (perThread->locksHeld != 0);
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserThreadRank --
 *
 *      Return the highest rank held by the specified thread via MXUser locks.
 *
 * Results:
 *      As above
 *
 * Side effects:
 *      Can optionally determine if a lock has been locked before.
 *
 *-----------------------------------------------------------------------------
 */

static MX_Rank
MXUserThreadRank(MXUserPerThread *perThread,  // IN:
                 MXUserHeader *header,        // IN:
                 Bool *firstUse)              // OUT:
{
   uint32 i;
   Bool foundOnce = TRUE;
   MX_Rank maxRank = RANK_UNRANKED;

   ASSERT(perThread);

   /*
    * Determine the maximum rank held. Note if the lock being acquired
    * was previously entered into the tracking system.
    */

   for (i = 0; i < perThread->locksHeld; i++) {
      MXUserHeader *chkHdr = perThread->lockArray[i];

      maxRank = MAX(chkHdr->rank, maxRank);

      if (chkHdr == header) {
         foundOnce = FALSE;
      }
   }

   if (firstUse) {
      *firstUse = foundOnce;
   }

   return maxRank;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserCurrentRank --
 *
 *      Return the highest rank held by the current thread via MXUser locks.
 *
 * Results:
 *      As above
 *
 * Side effects:
 *      None
 *
 *-----------------------------------------------------------------------------
 */

MX_Rank
MXUserCurrentRank(void)
{
   MX_Rank maxRank;
   MXUserPerThread *perThread = MXUserGetPerThread(FALSE);

   if (perThread == NULL) {
      maxRank = RANK_UNRANKED;
   } else {
      maxRank = MXUserThreadRank(perThread, NULL, NULL);
   }

   return maxRank;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserAcquisitionTracking --
 *
 *      Perform the appropriate tracking for lock acquisition.
 *
 * Results:
 *      Panic when a rank violation is detected (checkRank is TRUE).
 *      Add a lock instance to perThread lock list.
 *
 * Side effects:
 *      Manifold.
 *
 *-----------------------------------------------------------------------------
 */

void
MXUserAcquisitionTracking(MXUserHeader *header,  // IN:
                          Bool checkRank)        // IN:
{
   MXUserPerThread *perThread = MXUserGetPerThread(TRUE);

   VERIFY(perThread->locksHeld < MXUSER_MAX_LOCKS_PER_THREAD);

   /*
    * Rank checking anyone?
    *
    * Rank checking is abandoned once we're in a panic situation. This will
    * improve the chances of obtaining a good log and/or coredump.
    */

   if (checkRank && (header->rank != RANK_UNRANKED) && !MXUser_InPanic()) {
      MX_Rank maxRank;
      Bool firstInstance = TRUE;

      /*
       * Determine the highest rank held by the calling thread. Check for
       * MX locks if they are present.
       */

      maxRank = MXUserThreadRank(perThread, header, &firstInstance);

      if (MXUserMxCheckRank) {
         maxRank = MAX(maxRank, (*MXUserMxCheckRank)());
      }

      /*
       * Perform rank checking when a lock is entered into the tracking
       * system for the first time. This works out well because:
       *
       * Recursive locks are rank checked only upon their first acquisition...
       * just like MX locks.
       *
       * Exclusive locks will have a second entry added into the tracking
       * system but will immediately panic due to the run time checking - no
       * (real) harm done.
       */

      if (firstInstance && (header->rank <= maxRank)) {
         Warning("%s: lock rank violation by thread %s\n", __FUNCTION__,
                 VThread_CurName());
         Warning("%s: locks held:\n", __FUNCTION__);

         if (MXUserMxLockLister) {
            (*MXUserMxLockLister)();
         }

         MXUserListLocks();

         MXUserDumpAndPanic(header, "%s: rank violation maxRank=0x%x\n",
                            __FUNCTION__, maxRank);
      }
   }

   /* Add lock instance to the calling threads perThread information */
   perThread->lockArray[perThread->locksHeld++] = header;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserReleaseTracking --
 *
 *      Perform the appropriate tracking for lock release.
 *
 * Results:
 *      A panic.
 *
 * Side effects:
 *      Manifold.
 *
 *-----------------------------------------------------------------------------
 */

void
MXUserReleaseTracking(MXUserHeader *header)  // IN: lock, via its header
{
   uint32 i;
   uint32 lastEntry;
   MXUserPerThread *perThread = MXUserGetPerThread(FALSE);

   /* MXUserAcquisitionTracking should have already created a perThread */
   if (UNLIKELY(perThread == NULL)) {
      MXUserDumpAndPanic(header, "%s: perThread not found! (thread 0x%p)\n",
                         __FUNCTION__, MXUserCastedThreadID());
   }

   /* Search the perThread for the argument lock */
   for (i = 0; i < perThread->locksHeld; i++) {
      if (perThread->lockArray[i] == header) {
         break;
      }
   }

   /* The argument lock had better be in the perThread */
   if (UNLIKELY(i >= perThread->locksHeld)) {
      MXUserDumpAndPanic(header,
                         "%s: lock not found! (thread 0x%p; count %u)\n",
                         __FUNCTION__, MXUserCastedThreadID(),
                         perThread->locksHeld);
   }

   /* Remove the argument lock from the perThread */
   lastEntry = perThread->locksHeld - 1;

   if (i < lastEntry) {
      perThread->lockArray[i] = perThread->lockArray[lastEntry];
   }

   perThread->lockArray[lastEntry] = NULL;  // tidy up memory
   perThread->locksHeld--;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUser_TryAcquireFailureControl --
 *
 *      Should a TryAcquire operation fail, no matter "what", sometimes?
 *
 *      Failures occur statistically in debug builds to force our code down
 *      all of its paths.
 *
 * Results:
 *      Unknown
 *
 * Side effects:
 *      Always entertaining...
 *
 *-----------------------------------------------------------------------------
 */

void
MXUser_TryAcquireFailureControl(Bool (*func)(const char *name))  // IN:
{
   MXUserTryAcquireForceFail = func;
}


/*
 *-----------------------------------------------------------------------------
 *
 * MXUserValidateHeader --
 *
 *      Validate an MXUser object header
 *
 * Results:
 *      Return  All is well
 *      Panic   All is NOT well
 *
 * Side effects:
 *      Always entertaining...
 *
 *-----------------------------------------------------------------------------
 */

void
MXUserValidateHeader(MXUserHeader *header,         // IN:
                     MXUserObjectType objectType)  // IN:
{
   uint32 expected = MXUserGetSignature(objectType);

   if (header->signature != expected) {
      MXUserDumpAndPanic(header,
                        "%s: signature failure! expected 0x%X observed 0x%X\n",
                         __FUNCTION__, expected, header->signature);
   }

   if (header->serialNumber == 0) {
      MXUserDumpAndPanic(header, "%s: Invalid serial number!", __FUNCTION__);
   }
}
#endif