1
16
17 package net.sf.ehcache.store.disk;
18
19 import net.sf.ehcache.Cache;
20 import net.sf.ehcache.CacheEntry;
21 import net.sf.ehcache.CacheException;
22 import net.sf.ehcache.Ehcache;
23 import net.sf.ehcache.Element;
24 import net.sf.ehcache.Status;
25 import net.sf.ehcache.concurrent.CacheLockProvider;
26 import net.sf.ehcache.concurrent.LockType;
27 import net.sf.ehcache.concurrent.StripedReadWriteLock;
28 import net.sf.ehcache.concurrent.Sync;
29 import net.sf.ehcache.config.CacheConfiguration;
30 import net.sf.ehcache.config.CacheConfigurationListener;
31 import net.sf.ehcache.config.PinningConfiguration;
32 import net.sf.ehcache.config.SizeOfPolicyConfiguration;
33 import net.sf.ehcache.pool.Pool;
34 import net.sf.ehcache.pool.PoolAccessor;
35 import net.sf.ehcache.pool.PoolableStore;
36 import net.sf.ehcache.pool.impl.UnboundedPool;
37 import net.sf.ehcache.store.AbstractStore;
38 import net.sf.ehcache.store.ElementValueComparator;
39 import net.sf.ehcache.store.Policy;
40 import net.sf.ehcache.store.StripedReadWriteLockProvider;
41 import net.sf.ehcache.store.TierableStore;
42 import net.sf.ehcache.store.disk.DiskStorageFactory.DiskMarker;
43 import net.sf.ehcache.store.disk.DiskStorageFactory.DiskSubstitute;
44 import net.sf.ehcache.store.disk.DiskStorageFactory.Placeholder;
45 import net.sf.ehcache.writer.CacheWriterManager;
46
47 import java.io.File;
48 import java.io.IOException;
49 import java.io.Serializable;
50 import java.util.AbstractSet;
51 import java.util.ArrayList;
52 import java.util.Collection;
53 import java.util.Collections;
54 import java.util.Iterator;
55 import java.util.List;
56 import java.util.Random;
57 import java.util.Set;
58 import java.util.concurrent.TimeUnit;
59 import java.util.concurrent.atomic.AtomicReference;
60 import java.util.concurrent.locks.ReadWriteLock;
61 import java.util.concurrent.locks.ReentrantReadWriteLock;
62
63
71 public final class DiskStore extends AbstractStore implements TierableStore, PoolableStore, StripedReadWriteLockProvider {
72
73 private static final int FFFFCD7D = 0xffffcd7d;
74 private static final int FIFTEEN = 15;
75 private static final int TEN = 10;
76 private static final int THREE = 3;
77 private static final int SIX = 6;
78 private static final int FOURTEEN = 14;
79 private static final int SIXTEEN = 16;
80
81 private static final int RETRIES_BEFORE_LOCK = 2;
82 private static final int DEFAULT_INITIAL_CAPACITY = 16;
83 private static final int DEFAULT_SEGMENT_COUNT = 64;
84 private static final float DEFAULT_LOAD_FACTOR = 0.75f;
85 private static final int SLEEP_INTERVAL_MS = 10;
86
87 private final DiskStorageFactory disk;
88 private final Random rndm = new Random();
89 private final Segment[] segments;
90 private final int segmentShift;
91 private final AtomicReference<Status> status = new AtomicReference<Status>(Status.STATUS_UNINITIALISED);
92 private final boolean tierPinned;
93 private final boolean persistent;
94
95 private volatile CacheLockProvider lockProvider;
96 private volatile Set<Object> keySet;
97 private volatile PoolAccessor onHeapPoolAccessor;
98 private volatile PoolAccessor onDiskPoolAccessor;
99
100
101 private DiskStore(DiskStorageFactory disk, Ehcache cache, Pool onHeapPool, Pool onDiskPool) {
102 this.segments = new Segment[DEFAULT_SEGMENT_COUNT];
103 this.segmentShift = Integer.numberOfLeadingZeros(segments.length - 1);
104 this.onHeapPoolAccessor = onHeapPool.createPoolAccessor(this,
105 SizeOfPolicyConfiguration.resolveMaxDepth(cache),
106 SizeOfPolicyConfiguration.resolveBehavior(cache).equals(SizeOfPolicyConfiguration.MaxDepthExceededBehavior.ABORT));
107 this.onDiskPoolAccessor = onDiskPool.createPoolAccessor(this, new DiskSizeOfEngine());
108
109 for (int i = 0; i < this.segments.length; ++i) {
110 this.segments[i] = new Segment(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR,
111 disk, cache.getCacheConfiguration(), onHeapPoolAccessor, onDiskPoolAccessor, cache.getCacheEventNotificationService());
112 }
113
114 this.disk = disk;
115 this.disk.bind(this);
116 this.status.set(Status.STATUS_ALIVE);
117 this.tierPinned = cache.getCacheConfiguration().getPinningConfiguration() != null &&
118 cache.getCacheConfiguration().getPinningConfiguration().getStore() == PinningConfiguration.Store.INCACHE;
119 this.persistent = cache.getCacheConfiguration().isDiskPersistent();
120 }
121
122
130 public static DiskStore create(Ehcache cache, Pool onHeapPool, Pool onDiskPool) {
131 if (cache.getCacheManager() == null) {
132 throw new CacheException("Can't create diskstore without a cache manager");
133 }
134 DiskStorageFactory disk = new DiskStorageFactory(cache, cache.getCacheEventNotificationService());
135 DiskStore store = new DiskStore(disk, cache, onHeapPool, onDiskPool);
136 cache.getCacheConfiguration().addConfigurationListener(new CacheConfigurationListenerAdapter(disk, onDiskPool));
137 return store;
138 }
139
140
147 public static DiskStore create(Cache cache) {
148 return create(cache, new UnboundedPool(), new UnboundedPool());
149 }
150
151
154 public void unpinAll() {
155
156 }
157
158
161 public boolean isPinned(Object key) {
162 return false;
163 }
164
165
168 public void setPinned(Object key, boolean pinned) {
169
170 }
171
172
173
179 public boolean cleanUpFailedMarker(final Serializable key) {
180 int hash = hash(key.hashCode());
181 return segmentFor(hash).cleanUpFailedMarker(key, hash);
182 }
183
184
187 public StripedReadWriteLock createStripedReadWriteLock() {
188 return new DiskStoreStripedReadWriteLock();
189 }
190
191
194 private static final class CacheConfigurationListenerAdapter implements CacheConfigurationListener {
195
196 private final DiskStorageFactory disk;
197 private final Pool diskPool;
198
199 private CacheConfigurationListenerAdapter(DiskStorageFactory disk, Pool diskPool) {
200 this.disk = disk;
201 this.diskPool = diskPool;
202 }
203
204
207 public void timeToIdleChanged(long oldTimeToIdle, long newTimeToIdle) {
208
209 }
210
211
214 public void timeToLiveChanged(long oldTimeToLive, long newTimeToLive) {
215
216 }
217
218
221 public void diskCapacityChanged(int oldCapacity, int newCapacity) {
222 disk.setOnDiskCapacity(newCapacity);
223 }
224
225
228 public void memoryCapacityChanged(int oldCapacity, int newCapacity) {
229
230 }
231
232
235 public void loggingChanged(boolean oldValue, boolean newValue) {
236
237 }
238
239
242 public void registered(CacheConfiguration config) {
243
244 }
245
246
249 public void deregistered(CacheConfiguration config) {
250
251 }
252
253
256 public void maxBytesLocalHeapChanged(final long oldValue, final long newValue) {
257
258 }
259
260
263 public void maxBytesLocalDiskChanged(final long oldValue, final long newValue) {
264 diskPool.setMaxSize(newValue);
265 }
266 }
267
268
272 public void changeDiskCapacity(int newCapacity) {
273 disk.setOnDiskCapacity(newCapacity);
274 }
275
276
279 public boolean bufferFull() {
280 return disk.bufferFull();
281 }
282
283
286 public boolean containsKeyInMemory(Object key) {
287 return false;
288 }
289
290
293 public boolean containsKeyOffHeap(Object key) {
294 return false;
295 }
296
297
300 public boolean containsKeyOnDisk(Object key) {
301 return containsKey(key);
302 }
303
304
307 public void expireElements() {
308 disk.expireElements();
309 }
310
311
314 public void flush() throws IOException {
315 disk.flush();
316 }
317
318
321 public Policy getInMemoryEvictionPolicy() {
322 return null;
323 }
324
325
328 public int getInMemorySize() {
329 return 0;
330 }
331
332
335 public long getInMemorySizeInBytes() {
336 long size = onHeapPoolAccessor.getSize();
337 if (size < 0) {
338 return 0;
339 } else {
340 return size;
341 }
342 }
343
344
347 public int getOffHeapSize() {
348 return 0;
349 }
350
351
354 public long getOffHeapSizeInBytes() {
355 return 0;
356 }
357
358
361 public int getOnDiskSize() {
362 return disk.getOnDiskSize();
363 }
364
365
368 public long getOnDiskSizeInBytes() {
369 long size = onDiskPoolAccessor.getSize();
370 if (size < 0) {
371 return disk.getOnDiskSizeInBytes();
372 } else {
373 return size;
374 }
375 }
376
377
380 public int getTerracottaClusteredSize() {
381 return 0;
382 }
383
384
387 public void setInMemoryEvictionPolicy(Policy policy) {
388 }
389
390
395 public File getDataFile() {
396 return disk.getDataFile();
397 }
398
399
404 public File getIndexFile() {
405 return disk.getIndexFile();
406 }
407
408
411 public Object getMBean() {
412 return null;
413 }
414
415
418 public void fill(Element e) {
419 put(e);
420 }
421
422
425 public boolean removeIfNotPinned(final Object key) {
426 return !tierPinned && remove(key) != null;
427 }
428
429
432 public boolean put(Element element) {
433 if (element == null) {
434 return false;
435 } else {
436 Object key = element.getObjectKey();
437 int hash = hash(key.hashCode());
438 Element oldElement = segmentFor(hash).put(key, hash, element, false);
439 return oldElement == null;
440 }
441 }
442
443
446 public boolean putWithWriter(Element element, CacheWriterManager writerManager) {
447 boolean newPut = put(element);
448 if (writerManager != null) {
449 try {
450 writerManager.put(element);
451 } catch (RuntimeException e) {
452 throw new StoreUpdateException(e, !newPut);
453 }
454 }
455 return newPut;
456 }
457
458
461 public Element get(Object key) {
462 if (key == null) {
463 return null;
464 }
465
466 int hash = hash(key.hashCode());
467 return segmentFor(hash).get(key, hash);
468 }
469
470
473 public Element getQuiet(Object key) {
474 return get(key);
475 }
476
477
483 public Object unretrievedGet(Object key) {
484 if (key == null) {
485 return null;
486 }
487
488 int hash = hash(key.hashCode());
489 return segmentFor(hash).unretrievedGet(key, hash);
490 }
491
492
500 public boolean putRawIfAbsent(Object key, DiskMarker encoded) throws IllegalArgumentException {
501 int hash = hash(key.hashCode());
502 return segmentFor(hash).putRawIfAbsent(key, hash, encoded);
503 }
504
505
508 public List getKeys() {
509 return new ArrayList(keySet());
510 }
511
512
517 public Set<Object> keySet() {
518 if (keySet != null) {
519 return keySet;
520 } else {
521 keySet = new KeySet();
522 return keySet;
523 }
524 }
525
526
529 public Element remove(Object key) {
530 if (key == null) {
531 return null;
532 }
533
534 int hash = hash(key.hashCode());
535 return segmentFor(hash).remove(key, hash, null, null);
536 }
537
538
541 public void removeNoReturn(Object key) {
542 if (key != null) {
543 int hash = hash(key.hashCode());
544 segmentFor(hash).removeNoReturn(key, hash);
545 }
546 }
547
548
551 public boolean isTierPinned() {
552 return tierPinned;
553 }
554
555
558 public Set getPresentPinnedKeys() {
559 return Collections.emptySet();
560 }
561
562
565 public boolean isPersistent() {
566 return persistent;
567 }
568
569
572 public Element removeWithWriter(Object key, CacheWriterManager writerManager) {
573 Element removed = remove(key);
574 if (writerManager != null) {
575 writerManager.remove(new CacheEntry(key, removed));
576 }
577 return removed;
578 }
579
580
583 public void removeAll() {
584 for (Segment s : segments) {
585 s.clear();
586 }
587 }
588
589
592 public void dispose() {
593 if (status.compareAndSet(Status.STATUS_ALIVE, Status.STATUS_SHUTDOWN)) {
594 disk.unbind();
595 onHeapPoolAccessor.unlink();
596 onDiskPoolAccessor.unlink();
597 }
598 }
599
600
603 public int getSize() {
604 final Segment[] segs = this.segments;
605 long size = -1;
606
607
608 for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
609 size = volatileSize(segs);
610 if (size >= 0) {
611 break;
612 }
613 }
614 if (size < 0) {
615
616 size = lockedSize(segs);
617 }
618 if (size > Integer.MAX_VALUE) {
619 return Integer.MAX_VALUE;
620 } else {
621 return (int) size;
622 }
623 }
624
625 private static long volatileSize(Segment[] segs) {
626 int[] mc = new int[segs.length];
627 long check = 0;
628 long sum = 0;
629 int mcsum = 0;
630 for (int i = 0; i < segs.length; ++i) {
631 sum += segs[i].count;
632 mc[i] = segs[i].modCount;
633 mcsum += mc[i];
634 }
635 if (mcsum != 0) {
636 for (int i = 0; i < segs.length; ++i) {
637 check += segs[i].count;
638 if (mc[i] != segs[i].modCount) {
639 return -1;
640 }
641 }
642 }
643 if (check == sum) {
644 return sum;
645 } else {
646 return -1;
647 }
648 }
649
650 private static long lockedSize(Segment[] segs) {
651 long size = 0;
652 for (Segment seg : segs) {
653 seg.readLock().lock();
654 }
655 for (Segment seg : segs) {
656 size += seg.count;
657 }
658 for (Segment seg : segs) {
659 seg.readLock().unlock();
660 }
661
662 return size;
663 }
664
665
668 public Status getStatus() {
669 return status.get();
670 }
671
672
675 public boolean evictFromOnHeap(int count, long size) {
676
677 return disk.evict(count) == count;
678 }
679
680
683 public boolean evictFromOnDisk(int count, long size) {
684 return disk.evict(count) == count;
685 }
686
687
690 public float getApproximateDiskHitRate() {
691 float sum = 0;
692 for (Segment s : segments) {
693 sum += s.getDiskHitRate();
694 }
695 return sum;
696 }
697
698
701 public float getApproximateDiskMissRate() {
702 float sum = 0;
703 for (Segment s : segments) {
704 sum += s.getDiskMissRate();
705 }
706 return sum;
707 }
708
709
712 public long getApproximateDiskCountSize() {
713 return getOnDiskSize();
714 }
715
716
719 public long getApproximateDiskByteSize() {
720 return getOnDiskSizeInBytes();
721 }
722
723
726 public float getApproximateHeapHitRate() {
727 return 0;
728 }
729
730
733 public float getApproximateHeapMissRate() {
734 return 0;
735 }
736
737
740 public long getApproximateHeapCountSize() {
741 return getInMemorySize();
742 }
743
744
747 public long getApproximateHeapByteSize() {
748 return getInMemorySizeInBytes();
749 }
750
751
754 public boolean containsKey(Object key) {
755 int hash = hash(key.hashCode());
756 return segmentFor(hash).containsKey(key, hash);
757 }
758
759
762 public Object getInternalContext() {
763 if (lockProvider != null) {
764 return lockProvider;
765 } else {
766 lockProvider = new LockProvider();
767 return lockProvider;
768 }
769 }
770
771
774 public Element putIfAbsent(Element element) throws NullPointerException {
775 Object key = element.getObjectKey();
776 int hash = hash(key.hashCode());
777 return segmentFor(hash).put(key, hash, element, true);
778 }
779
780
783 public Element removeElement(Element element, ElementValueComparator comparator) throws NullPointerException {
784 Object key = element.getObjectKey();
785 int hash = hash(key.hashCode());
786 return segmentFor(hash).remove(key, hash, element, comparator);
787 }
788
789
792 public boolean replace(Element old, Element element, ElementValueComparator comparator)
793 throws NullPointerException, IllegalArgumentException {
794 Object key = element.getObjectKey();
795 int hash = hash(key.hashCode());
796 return segmentFor(hash).replace(key, hash, old, element, comparator);
797 }
798
799
802 public Element replace(Element element) throws NullPointerException {
803 Object key = element.getObjectKey();
804 int hash = hash(key.hashCode());
805 return segmentFor(hash).replace(key, hash, element);
806 }
807
808
821 public boolean fault(Object key, Placeholder expect, DiskMarker fault) {
822 int hash = hash(key.hashCode());
823 return segmentFor(hash).fault(key, hash, expect, fault);
824 }
825
826
834 public boolean evict(Object key, DiskSubstitute substitute) {
835 return evictElement(key, substitute) != null;
836 }
837
838
846 public Element evictElement(Object key, DiskSubstitute substitute) {
847 int hash = hash(key.hashCode());
848 return segmentFor(hash).evict(key, hash, substitute);
849 }
850
851
859 public List<DiskStorageFactory.DiskSubstitute> getRandomSample(ElementSubstituteFilter factory, int sampleSize, Object keyHint) {
860 ArrayList<DiskStorageFactory.DiskSubstitute> sampled = new ArrayList<DiskStorageFactory.DiskSubstitute>(sampleSize);
861
862
863 int randomHash = rndm.nextInt();
864
865 final int segmentStart;
866 if (keyHint == null) {
867 segmentStart = (randomHash >>> segmentShift);
868 } else {
869 segmentStart = (hash(keyHint.hashCode()) >>> segmentShift);
870 }
871
872 int segmentIndex = segmentStart;
873 do {
874 segments[segmentIndex].addRandomSample(factory, sampleSize, sampled, randomHash);
875 if (sampled.size() >= sampleSize) {
876 break;
877 }
878
879
880 segmentIndex = (segmentIndex + 1) & (segments.length - 1);
881 } while (segmentIndex != segmentStart);
882
883 return sampled;
884 }
885
886 private static int hash(int hash) {
887 int spread = hash;
888 spread += (spread << FIFTEEN ^ FFFFCD7D);
889 spread ^= spread >>> TEN;
890 spread += (spread << THREE);
891 spread ^= spread >>> SIX;
892 spread += (spread << 2) + (spread << FOURTEEN);
893 return (spread ^ spread >>> SIXTEEN);
894 }
895
896 private Segment segmentFor(int hash) {
897 return segments[hash >>> segmentShift];
898 }
899
900
903 final class KeySet extends AbstractSet<Object> {
904
905
908 @Override
909 public Iterator<Object> iterator() {
910 return new KeyIterator();
911 }
912
913
916 @Override
917 public int size() {
918 return DiskStore.this.getSize();
919 }
920
921
924 @Override
925 public boolean contains(Object o) {
926 return DiskStore.this.containsKey(o);
927 }
928
929
932 @Override
933 public boolean remove(Object o) {
934 return DiskStore.this.remove(o) != null;
935 }
936
937
940 @Override
941 public void clear() {
942 DiskStore.this.removeAll();
943 }
944
945
948 @Override
949 public Object[] toArray() {
950 Collection<Object> c = new ArrayList<Object>();
951 for (Object object : this) {
952 c.add(object);
953 }
954 return c.toArray();
955 }
956
957
960 @Override
961 public <T> T[] toArray(T[] a) {
962 Collection<Object> c = new ArrayList<Object>();
963 for (Object object : this) {
964 c.add(object);
965 }
966 return c.toArray(a);
967 }
968 }
969
970
973 private class LockProvider implements CacheLockProvider {
974
975
978 public Sync getSyncForKey(Object key) {
979 int hash = key == null ? 0 : hash(key.hashCode());
980 return new ReadWriteLockSync(segmentFor(hash));
981 }
982 }
983
984
987 abstract class HashIterator {
988 private int segmentIndex;
989 private Iterator<HashEntry> currentIterator;
990
991
994 HashIterator() {
995 segmentIndex = segments.length;
996
997 while (segmentIndex > 0) {
998 segmentIndex--;
999 currentIterator = segments[segmentIndex].hashIterator();
1000 if (currentIterator.hasNext()) {
1001 return;
1002 }
1003 }
1004 }
1005
1006
1009 public boolean hasNext() {
1010 if (this.currentIterator == null) {
1011 return false;
1012 }
1013
1014 if (this.currentIterator.hasNext()) {
1015 return true;
1016 } else {
1017 while (segmentIndex > 0) {
1018 segmentIndex--;
1019 currentIterator = segments[segmentIndex].hashIterator();
1020 if (currentIterator.hasNext()) {
1021 return true;
1022 }
1023 }
1024 }
1025 return false;
1026 }
1027
1028
1033 protected HashEntry nextEntry() {
1034 if (currentIterator == null) {
1035 return null;
1036 }
1037
1038 if (currentIterator.hasNext()) {
1039 return currentIterator.next();
1040 } else {
1041 while (segmentIndex > 0) {
1042 segmentIndex--;
1043 currentIterator = segments[segmentIndex].hashIterator();
1044 if (currentIterator.hasNext()) {
1045 return currentIterator.next();
1046 }
1047 }
1048 }
1049 return null;
1050 }
1051
1052
1055 public void remove() {
1056 currentIterator.remove();
1057 }
1058
1059
1064 int getCurrentSegmentIndex() {
1065 return segmentIndex;
1066 }
1067 }
1068
1069
1072 private final class KeyIterator extends HashIterator implements Iterator<Object> {
1073
1076 public Object next() {
1077 return super.nextEntry().key;
1078 }
1079 }
1080
1081
1084 private static final class ReadWriteLockSync implements Sync {
1085
1086 private final ReentrantReadWriteLock lock;
1087
1088 private ReadWriteLockSync(ReentrantReadWriteLock lock) {
1089 this.lock = lock;
1090 }
1091
1092
1095 public void lock(LockType type) {
1096 switch (type) {
1097 case READ:
1098 lock.readLock().lock();
1099 break;
1100 case WRITE:
1101 lock.writeLock().lock();
1102 break;
1103 default:
1104 throw new IllegalArgumentException("We don't support any other lock type than READ or WRITE!");
1105 }
1106 }
1107
1108
1111 public boolean tryLock(LockType type, long msec) throws InterruptedException {
1112 switch (type) {
1113 case READ:
1114 return lock.readLock().tryLock(msec, TimeUnit.MILLISECONDS);
1115 case WRITE:
1116 return lock.writeLock().tryLock(msec, TimeUnit.MILLISECONDS);
1117 default:
1118 throw new IllegalArgumentException("We don't support any other lock type than READ or WRITE!");
1119 }
1120 }
1121
1122
1125 public void unlock(LockType type) {
1126 switch (type) {
1127 case READ:
1128 lock.readLock().unlock();
1129 break;
1130 case WRITE:
1131 lock.writeLock().unlock();
1132 break;
1133 default:
1134 throw new IllegalArgumentException("We don't support any other lock type than READ or WRITE!");
1135 }
1136 }
1137
1138
1141 public boolean isHeldByCurrentThread(LockType type) {
1142 switch (type) {
1143 case READ:
1144 throw new UnsupportedOperationException("Querying of read lock is not supported.");
1145 case WRITE:
1146 return lock.isWriteLockedByCurrentThread();
1147 default:
1148 throw new IllegalArgumentException("We don't support any other lock type than READ or WRITE!");
1149 }
1150 }
1151
1152 }
1153
1154
1157 private final class DiskStoreStripedReadWriteLock implements StripedReadWriteLock {
1158
1159 private final net.sf.ehcache.concurrent.ReadWriteLockSync[] locks =
1160 new net.sf.ehcache.concurrent.ReadWriteLockSync[DEFAULT_SEGMENT_COUNT];
1161
1162 private DiskStoreStripedReadWriteLock() {
1163 for (int i = 0; i < locks.length; i++) {
1164 locks[i] = new net.sf.ehcache.concurrent.ReadWriteLockSync();
1165 }
1166 }
1167
1168
1171 public ReadWriteLock getLockForKey(final Object key) {
1172 return getSyncForKey(key).getReadWriteLock();
1173 }
1174
1175
1178 public List<net.sf.ehcache.concurrent.ReadWriteLockSync> getAllSyncs() {
1179 ArrayList<net.sf.ehcache.concurrent.ReadWriteLockSync> syncs =
1180 new ArrayList<net.sf.ehcache.concurrent.ReadWriteLockSync>(locks.length);
1181 Collections.addAll(syncs, locks);
1182 return syncs;
1183 }
1184
1185
1188 public net.sf.ehcache.concurrent.ReadWriteLockSync getSyncForKey(final Object key) {
1189 return locks[indexFor(key)];
1190 }
1191
1192 private int indexFor(final Object key) {
1193 return hash(key.hashCode()) >>> segmentShift;
1194 }
1195 }
1196 }
1197