1 /**
2 * Copyright Terracotta, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /**
18 *
19 */
20 package net.sf.ehcache.store.disk;
21
22 import java.io.Serializable;
23 import java.util.Collection;
24 import java.util.Iterator;
25 import java.util.NoSuchElementException;
26 import java.util.concurrent.TimeUnit;
27 import java.util.concurrent.locks.ReentrantReadWriteLock;
28
29 import net.sf.ehcache.Element;
30 import net.sf.ehcache.config.CacheConfiguration;
31 import net.sf.ehcache.config.PinningConfiguration;
32 import net.sf.ehcache.event.RegisteredEventListeners;
33 import net.sf.ehcache.pool.PoolAccessor;
34 import net.sf.ehcache.store.ElementValueComparator;
35 import net.sf.ehcache.store.disk.DiskStorageFactory.DiskMarker;
36 import net.sf.ehcache.store.disk.DiskStorageFactory.DiskSubstitute;
37 import net.sf.ehcache.store.disk.DiskStorageFactory.Placeholder;
38 import net.sf.ehcache.util.FindBugsSuppressWarnings;
39 import net.sf.ehcache.util.ratestatistics.AtomicRateStatistic;
40 import net.sf.ehcache.util.ratestatistics.RateStatistic;
41
42 import org.slf4j.Logger;
43 import org.slf4j.LoggerFactory;
44
45 /**
46 * Segment implementation used in LocalStore.
47 * <p>
48 * The segment extends ReentrantReadWriteLock to allow read locking on read operations.
49 * In addition to the typical CHM-like methods, this classes additionally supports
50 * replacement under a read lock - which is accomplished using an atomic CAS on the
51 * associated HashEntry.
52 *
53 * @author Chris Dennis
54 * @author Ludovic Orban
55 */
56 public class Segment extends ReentrantReadWriteLock {
57
58 private static final Logger LOG = LoggerFactory.getLogger(Segment.class.getName());
59 private static final HashEntry NULL_HASH_ENTRY = new HashEntry(null, 0, null, null);
60
61 private static final float LOAD_FACTOR = 0.75f;
62 private static final int MAXIMUM_CAPACITY = Integer.highestOneBit(Integer.MAX_VALUE);
63
64 /**
65 * Count of elements in the map.
66 * <p>
67 * A volatile reference is needed here for the same reasons as in the table reference.
68 */
69 protected volatile int count;
70
71 /**
72 * Mod-count used to track concurrent modifications when doing size calculations or iterating over the store.
73 * <p>
74 * Note that we don't actually have any iterators yet...
75 */
76 protected int modCount;
77
78 /**
79 * The primary substitute factory.
80 * <p>
81 * This is the substitute type used to store <code>Element</code>s when they are first added to the store.
82 */
83 private final DiskStorageFactory disk;
84
85 /**
86 * Table of HashEntry linked lists, indexed by the least-significant bits of the spread-hash value.
87 * <p>
88 * A volatile reference is needed to ensure the visibility of table changes made during rehash operations to size operations.
89 * Key operations are done under read-locks so there is no need for volatility in that regard. Hence if we switched to read-locked
90 * size operations, we wouldn't need a volatile reference here.
91 */
92 private volatile HashEntry[] table;
93
94 /**
95 * Size at which the next rehashing of this Segment should occur
96 */
97 private int threshold;
98
99 private final RateStatistic diskHitRate = new AtomicRateStatistic(1000, TimeUnit.MILLISECONDS);
100 private final RateStatistic diskMissRate = new AtomicRateStatistic(1000, TimeUnit.MILLISECONDS);
101 private final PoolAccessor onHeapPoolAccessor;
102 private final PoolAccessor onDiskPoolAccessor;
103 private final RegisteredEventListeners cacheEventNotificationService;
104 private volatile boolean cachePinned;
105
106 /**
107 * Create a Segment with the given initial capacity, load-factor, primary element substitute factory, and identity element substitute factory.
108 * <p>
109 * An identity element substitute factory is specified at construction time because only one subclass of IdentityElementProxyFactory
110 * can be used with a Segment. Without this requirement the mapping between bare {@link net.sf.ehcache.Element} instances and the factory
111 * responsible for them would be ambiguous.
112 * <p>
113 * If a <code>null</code> identity element substitute factory is specified then encountering a raw element (i.e. as a result of using an
114 * identity element substitute factory) will result in a null pointer exception during decode.
115 *
116 * @param initialCapacity initial capacity of store
117 * @param loadFactor fraction of capacity at which rehash occurs
118 * @param primary primary element substitute factory
119 * @param cacheConfiguration the cache configuration
120 * @param onHeapPoolAccessor the pool tracking on-heap usage
121 * @param onDiskPoolAccessor the pool tracking on-disk usage
122 * @param cacheEventNotificationService
123 */
124 public Segment(int initialCapacity, float loadFactor, DiskStorageFactory primary,
125 CacheConfiguration cacheConfiguration,
126 PoolAccessor onHeapPoolAccessor, PoolAccessor onDiskPoolAccessor,
127 RegisteredEventListeners cacheEventNotificationService) {
128 this.onHeapPoolAccessor = onHeapPoolAccessor;
129 this.onDiskPoolAccessor = onDiskPoolAccessor;
130 this.cacheEventNotificationService = cacheEventNotificationService;
131 this.table = new HashEntry[initialCapacity];
132 this.threshold = (int) (table.length * loadFactor);
133 this.modCount = 0;
134 this.disk = primary;
135 this.cachePinned = determineCachePinned(cacheConfiguration);
136 }
137
138 private static boolean determineCachePinned(CacheConfiguration cacheConfiguration) {
139 PinningConfiguration pinningConfiguration = cacheConfiguration.getPinningConfiguration();
140 if (pinningConfiguration == null) {
141 return false;
142 }
143
144 switch (pinningConfiguration.getStore()) {
145 case LOCALHEAP:
146 return false;
147
148 case LOCALMEMORY:
149 return false;
150
151 case INCACHE:
152 return cacheConfiguration.isOverflowToDisk();
153
154 default:
155 throw new IllegalArgumentException();
156 }
157 }
158
159 private HashEntry getFirst(int hash) {
160 HashEntry[] tab = table;
161 return tab[hash & (tab.length - 1)];
162 }
163
164 /**
165 * Decode the possible DiskSubstitute
166 *
167 * @param object the DiskSubstitute to decode
168 * @return the decoded DiskSubstitute
169 */
170 private Element decode(Object object) {
171 DiskStorageFactory.DiskSubstitute substitute = (DiskStorageFactory.DiskSubstitute) object;
172 return substitute.getFactory().retrieve(substitute);
173 }
174
175 /**
176 * Decode the possible DiskSubstitute, updating the statistics
177 *
178 * @param object the DiskSubstitute to decode
179 * @return the decoded DiskSubstitute
180 */
181 private Element decodeHit(Object object) {
182 DiskStorageFactory.DiskSubstitute substitute = (DiskStorageFactory.DiskSubstitute) object;
183 return substitute.getFactory().retrieve(substitute, this);
184 }
185
186 /**
187 * Free the DiskSubstitute
188 *
189 * @param object the DiskSubstitute to free
190 */
191 private void free(Object object) {
192 free(object, false);
193 }
194
195 /**
196 * Free the DiskSubstitute indicating if it could not be faulted
197 *
198 * @param object the DiskSubstitute to free
199 * @param faultFailure true if the DiskSubstitute should be freed because of a fault failure
200 */
201 private void free(Object object, boolean faultFailure) {
202 DiskStorageFactory.DiskSubstitute diskSubstitute = (DiskStorageFactory.DiskSubstitute) object;
203 diskSubstitute.getFactory().free(writeLock(), diskSubstitute, faultFailure);
204 }
205
206 /**
207 * Get the element mapped to this key (or null if there is no mapping for this key)
208 *
209 * @param key key to lookup
210 * @param hash spread-hash for this key
211 * @return mapped element
212 */
213 Element get(Object key, int hash) {
214 readLock().lock();
215 try {
216 // read-volatile
217 if (count != 0) {
218 HashEntry e = getFirst(hash);
219 while (e != null) {
220 if (e.hash == hash && key.equals(e.key)) {
221 return decodeHit(e.element);
222 }
223 e = e.next;
224 }
225 }
226 miss();
227 return null;
228 } finally {
229 readLock().unlock();
230 }
231 }
232
233 /**
234 * Return the unretrieved (undecoded) value for this key
235 *
236 * @param key key to lookup
237 * @param hash spread-hash for the key
238 * @return Element or ElementSubstitute
239 */
240 Object unretrievedGet(Object key, int hash) {
241 readLock().lock();
242 try {
243 if (count != 0) {
244 HashEntry e = getFirst(hash);
245 while (e != null) {
246 if (e.hash == hash && key.equals(e.key)) {
247 return e.element;
248 }
249 e = e.next;
250 }
251 }
252 } finally {
253 readLock().unlock();
254 }
255 return null;
256 }
257
258 /**
259 * Return true if this segment contains a mapping for this key
260 *
261 * @param key key to check for
262 * @param hash spread-hash for key
263 * @return <code>true</code> if there is a mapping for this key
264 */
265 boolean containsKey(Object key, int hash) {
266 readLock().lock();
267 try {
268 // read-volatile
269 if (count != 0) {
270 HashEntry e = getFirst(hash);
271 while (e != null) {
272 if (e.hash == hash && key.equals(e.key)) {
273 return true;
274 }
275 e = e.next;
276 }
277 }
278 return false;
279 } finally {
280 readLock().unlock();
281 }
282 }
283
284 /**
285 * Replace the element mapped to this key only if currently mapped to the given element.
286 *
287 *
288 * @param key key to map the element to
289 * @param hash spread-hash for the key
290 * @param oldElement expected element
291 * @param newElement element to add
292 * @param comparator the comparator to use to compare values
293 * @return <code>true</code> on a successful replace
294 */
295 boolean replace(Object key, int hash, Element oldElement, Element newElement, ElementValueComparator comparator) {
296 boolean installed = false;
297 DiskStorageFactory.DiskSubstitute encoded = disk.create(newElement);
298
299 writeLock().lock();
300 try {
301 HashEntry e = getFirst(hash);
302 while (e != null && (e.hash != hash || !key.equals(e.key))) {
303 e = e.next;
304 }
305
306 boolean replaced = false;
307 if (e != null && comparator.equals(oldElement, decode(e.element))) {
308 replaced = true;
309 /*
310 * make sure we re-get from the HashEntry - since the decode in the conditional
311 * may have faulted in a different type - we must make sure we know what type
312 * to do the increment/decrement on.
313 */
314 DiskSubstitute onDiskSubstitute = e.element;
315
316 final long deltaHeapSize = onHeapPoolAccessor.replace(onDiskSubstitute.onHeapSize, key, encoded, NULL_HASH_ENTRY, cachePinned);
317 if (deltaHeapSize == Long.MAX_VALUE) {
318 LOG.debug("replace3 failed to add on heap");
319 free(encoded);
320 return false;
321 } else {
322 LOG.debug("replace3 added {} on heap", deltaHeapSize);
323 encoded.onHeapSize = onDiskSubstitute.onHeapSize + deltaHeapSize;
324 }
325
326 e.element = encoded;
327 installed = true;
328 free(onDiskSubstitute);
329
330 if (onDiskSubstitute instanceof DiskStorageFactory.DiskMarker) {
331 final long outgoingDiskSize = onDiskPoolAccessor.delete(((DiskStorageFactory.DiskMarker) onDiskSubstitute).getSize());
332 LOG.debug("replace3 removed {} from disk", outgoingDiskSize);
333 }
334 } else {
335 free(encoded);
336 }
337 return replaced;
338 } finally {
339 writeLock().unlock();
340
341 if (installed) {
342 encoded.installed();
343 }
344 }
345 }
346
347 /**
348 * Replace the entry for this key only if currently mapped to some element.
349 *
350 * @param key key to map the element to
351 * @param hash spread-hash for the key
352 * @param newElement element to add
353 * @return previous element mapped to this key
354 */
355 Element replace(Object key, int hash, Element newElement) {
356 boolean installed = false;
357 DiskStorageFactory.DiskSubstitute encoded = disk.create(newElement);
358
359 writeLock().lock();
360 try {
361 HashEntry e = getFirst(hash);
362 while (e != null && (e.hash != hash || !key.equals(e.key))) {
363 e = e.next;
364 }
365
366 Element oldElement = null;
367 if (e != null) {
368 DiskSubstitute onDiskSubstitute = e.element;
369
370 final long deltaHeapSize = onHeapPoolAccessor.replace(onDiskSubstitute.onHeapSize, key, encoded, NULL_HASH_ENTRY, cachePinned);
371 if (deltaHeapSize == Long.MAX_VALUE) {
372 LOG.debug("replace2 failed to add on heap");
373 free(encoded);
374 return null;
375 } else {
376 LOG.debug("replace2 added {} on heap", deltaHeapSize);
377 encoded.onHeapSize = onDiskSubstitute.onHeapSize + deltaHeapSize;
378 }
379
380 e.element = encoded;
381 installed = true;
382 oldElement = decode(onDiskSubstitute);
383 free(onDiskSubstitute);
384
385 if (onDiskSubstitute instanceof DiskStorageFactory.DiskMarker) {
386 final long outgoingDiskSize = onDiskPoolAccessor.delete(((DiskStorageFactory.DiskMarker) onDiskSubstitute).getSize());
387 LOG.debug("replace2 removed {} from disk", outgoingDiskSize);
388 }
389 } else {
390 free(encoded);
391 }
392
393 return oldElement;
394 } finally {
395 writeLock().unlock();
396
397 if (installed) {
398 encoded.installed();
399 }
400 }
401 }
402
403 /**
404 * Add the supplied mapping.
405 * <p>
406 * The supplied element is substituted using the primary element proxy factory
407 * before being stored in the cache. If <code>onlyIfAbsent</code> is set
408 * then the mapping will only be added if no element is currently mapped
409 * to that key.
410 *
411 * @param key key to map the element to
412 * @param hash spread-hash for the key
413 * @param element element to store
414 * @param onlyIfAbsent if true does not replace existing mappings
415 * @return previous element mapped to this key
416 */
417 Element put(Object key, int hash, Element element, boolean onlyIfAbsent) {
418 boolean installed = false;
419 DiskSubstitute encoded = disk.create(element);
420 final long incomingHeapSize = onHeapPoolAccessor.add(key, encoded, NULL_HASH_ENTRY, cachePinned);
421 if (incomingHeapSize < 0) {
422 LOG.debug("put failed to add on heap");
423 return null;
424 } else {
425 LOG.debug("put added {} on heap", incomingHeapSize);
426 encoded.onHeapSize = incomingHeapSize;
427 }
428
429 writeLock().lock();
430 try {
431 // ensure capacity
432 if (count + 1 > threshold) {
433 rehash();
434 }
435 HashEntry[] tab = table;
436 int index = hash & (tab.length - 1);
437 HashEntry first = tab[index];
438 HashEntry e = first;
439 while (e != null && (e.hash != hash || !key.equals(e.key))) {
440 e = e.next;
441 }
442
443 Element oldElement;
444 if (e != null) {
445 DiskSubstitute onDiskSubstitute = e.element;
446 if (!onlyIfAbsent) {
447 e.element = encoded;
448 installed = true;
449 oldElement = decode(onDiskSubstitute);
450
451 free(onDiskSubstitute);
452 final long existingHeapSize = onHeapPoolAccessor.delete(onDiskSubstitute.onHeapSize);
453 LOG.debug("put updated, deleted {} on heap", existingHeapSize);
454
455 if (onDiskSubstitute instanceof DiskStorageFactory.DiskMarker) {
456 final long existingDiskSize = onDiskPoolAccessor.delete(((DiskStorageFactory.DiskMarker) onDiskSubstitute).getSize());
457 LOG.debug("put updated, deleted {} on disk", existingDiskSize);
458 }
459 } else {
460 oldElement = decode(onDiskSubstitute);
461
462 free(encoded);
463 final long outgoingHeapSize = onHeapPoolAccessor.delete(encoded.onHeapSize);
464 LOG.debug("put if absent failed, deleted {} on heap", outgoingHeapSize);
465 }
466 } else {
467 oldElement = null;
468 ++modCount;
469 tab[index] = new HashEntry(key, hash, first, encoded);
470 installed = true;
471 // write-volatile
472 count = count + 1;
473 }
474 return oldElement;
475
476 } finally {
477 writeLock().unlock();
478
479 if (installed) {
480 encoded.installed();
481 }
482 }
483 }
484
485
486 /**
487 * Add the supplied pre-encoded mapping.
488 * <p>
489 * The supplied encoded element is directly inserted into the segment
490 * if there is no other mapping for this key.
491 *
492 * @param key key to map the element to
493 * @param hash spread-hash for the key
494 * @param encoded encoded element to store
495 * @return <code>true</code> if the encoded element was installed
496 * @throws IllegalArgumentException if the supplied key is already present
497 */
498 boolean putRawIfAbsent(Object key, int hash, DiskMarker encoded) throws IllegalArgumentException {
499 writeLock().lock();
500 try {
501 if (!onDiskPoolAccessor.canAddWithoutEvicting(key, null, encoded)) {
502 return false;
503 }
504 final long incomingHeapSize = onHeapPoolAccessor.add(key, encoded, NULL_HASH_ENTRY, cachePinned);
505 if (incomingHeapSize < 0) {
506 return false;
507 } else {
508 encoded.onHeapSize = incomingHeapSize;
509 }
510 if (onDiskPoolAccessor.add(key, null, encoded, cachePinned) < 0) {
511 onHeapPoolAccessor.delete(encoded.onHeapSize);
512 return false;
513 }
514
515 // ensure capacity
516 if (count + 1 > threshold) {
517 rehash();
518 }
519 HashEntry[] tab = table;
520 int index = hash & (tab.length - 1);
521 HashEntry first = tab[index];
522 HashEntry e = first;
523 while (e != null && (e.hash != hash || !key.equals(e.key))) {
524 e = e.next;
525 }
526
527 if (e == null) {
528 ++modCount;
529 tab[index] = new HashEntry(key, hash, first, encoded);
530 // write-volatile
531 count = count + 1;
532 return true;
533 } else {
534 onHeapPoolAccessor.delete(encoded.onHeapSize);
535 onDiskPoolAccessor.delete(encoded.getSize());
536 throw new IllegalArgumentException("Duplicate key detected");
537 }
538 } finally {
539 writeLock().unlock();
540 }
541 }
542
543 private void rehash() {
544 HashEntry[] oldTable = table;
545 int oldCapacity = oldTable.length;
546 if (oldCapacity >= MAXIMUM_CAPACITY) {
547 return;
548 }
549
550 /*
551 * Reclassify nodes in each list to new Map. Because we are
552 * using power-of-two expansion, the elements from each bin
553 * must either stay at same index, or move with a power of two
554 * offset. We eliminate unnecessary node creation by catching
555 * cases where old nodes can be reused because their next
556 * fields won't change. Statistically, at the default
557 * threshold, only about one-sixth of them need cloning when
558 * a table doubles. The nodes they replace will be garbage
559 * collectable as soon as they are no longer referenced by any
560 * reader thread that may be in the midst of traversing table
561 * right now.
562 */
563
564 HashEntry[] newTable = new HashEntry[oldCapacity << 1];
565 threshold = (int)(newTable.length * LOAD_FACTOR);
566 int sizeMask = newTable.length - 1;
567 for (int i = 0; i < oldCapacity; i++) {
568 // We need to guarantee that any existing reads of old Map can
569 // proceed. So we cannot yet null out each bin.
570 HashEntry e = oldTable[i];
571
572 if (e != null) {
573 HashEntry next = e.next;
574 int idx = e.hash & sizeMask;
575
576 // Single node on list
577 if (next == null) {
578 newTable[idx] = e;
579 } else {
580 // Reuse trailing consecutive sequence at same slot
581 HashEntry lastRun = e;
582 int lastIdx = idx;
583 for (HashEntry last = next;
584 last != null;
585 last = last.next) {
586 int k = last.hash & sizeMask;
587 if (k != lastIdx) {
588 lastIdx = k;
589 lastRun = last;
590 }
591 }
592 newTable[lastIdx] = lastRun;
593
594 // Clone all remaining nodes
595 for (HashEntry p = e; p != lastRun; p = p.next) {
596 int k = p.hash & sizeMask;
597 HashEntry n = newTable[k];
598 newTable[k] = new HashEntry(p.key, p.hash, n, p.element);
599 }
600 }
601 }
602 }
603 table = newTable;
604 }
605
606 /**
607 * Remove the matching mapping.
608 * <p>
609 * If <code>value</code> is <code>null</code> then match on the key only,
610 * else match on both the key and the value.
611 *
612 *
613 * @param key key to match against
614 * @param hash spread-hash for the key
615 * @param value optional value to match against
616 * @param comparator the comparator to use to compare values
617 * @return removed element
618 */
619 Element remove(Object key, int hash, Element value, ElementValueComparator comparator) {
620 writeLock().lock();
621 try {
622 HashEntry[] tab = table;
623 int index = hash & (tab.length - 1);
624 HashEntry first = tab[index];
625 HashEntry e = first;
626 while (e != null && (e.hash != hash || !key.equals(e.key))) {
627 e = e.next;
628 }
629
630 Element oldValue = null;
631 if (e != null) {
632 oldValue = decode(e.element);
633 if (value == null || comparator.equals(value, oldValue)) {
634 // All entries following removed node can stay
635 // in list, but all preceding ones need to be
636 // cloned.
637 ++modCount;
638 HashEntry newFirst = e.next;
639 for (HashEntry p = first; p != e; p = p.next) {
640 newFirst = new HashEntry(p.key, p.hash, newFirst, p.element);
641 }
642 tab[index] = newFirst;
643 /*
644 * make sure we re-get from the HashEntry - since the decode in the conditional
645 * may have faulted in a different type - we must make sure we know what type
646 * to do the free on.
647 */
648 DiskSubstitute onDiskSubstitute = e.element;
649 free(onDiskSubstitute);
650
651 final long outgoingHeapSize = onHeapPoolAccessor.delete(onDiskSubstitute.onHeapSize);
652 LOG.debug("remove deleted {} from heap", outgoingHeapSize);
653
654 if (onDiskSubstitute instanceof DiskStorageFactory.DiskMarker) {
655 final long outgoingDiskSize = onDiskPoolAccessor.delete(((DiskStorageFactory.DiskMarker) onDiskSubstitute).getSize());
656 LOG.debug("remove deleted {} from disk", outgoingDiskSize);
657 }
658
659 // write-volatile
660 count = count - 1;
661 } else {
662 oldValue = null;
663 }
664 }
665
666 if (oldValue == null) {
667 LOG.debug("remove deleted nothing");
668 }
669
670 return oldValue;
671 } finally {
672 writeLock().unlock();
673 }
674 }
675
676 /**
677 * Remove the matching mapping.
678 *
679 * @param key key to match against
680 * @param hash spread-hash for the key
681 */
682 void removeNoReturn(Object key, int hash) {
683 writeLock().lock();
684 try {
685 HashEntry[] tab = table;
686 int index = hash & (tab.length - 1);
687 HashEntry first = tab[index];
688 HashEntry e = first;
689 while (e != null && (e.hash != hash || !key.equals(e.key))) {
690 e = e.next;
691 }
692
693 if (e != null) {
694 // All entries following removed node can stay
695 // in list, but all preceding ones need to be
696 // cloned.
697 ++modCount;
698 HashEntry newFirst = e.next;
699 for (HashEntry p = first; p != e; p = p.next) {
700 newFirst = new HashEntry(p.key, p.hash, newFirst, p.element);
701 }
702 tab[index] = newFirst;
703 /*
704 * make sure we re-get from the HashEntry - since the decode in the conditional
705 * may have faulted in a different type - we must make sure we know what type
706 * to do the free on.
707 */
708 DiskSubstitute onDiskSubstitute = e.element;
709 free(onDiskSubstitute);
710
711 final long outgoingHeapSize = onHeapPoolAccessor.delete(onDiskSubstitute.onHeapSize);
712 LOG.debug("remove deleted {} from heap", outgoingHeapSize);
713
714 if (onDiskSubstitute instanceof DiskStorageFactory.DiskMarker) {
715 final long outgoingDiskSize = onDiskPoolAccessor.delete(((DiskStorageFactory.DiskMarker) onDiskSubstitute).getSize());
716 LOG.debug("remove deleted {} from disk", outgoingDiskSize);
717 }
718
719 // write-volatile
720 count = count - 1;
721 }
722 } finally {
723 writeLock().unlock();
724 }
725 }
726
727 /**
728 * Removes all mappings from this segment.
729 */
730 void clear() {
731 writeLock().lock();
732 try {
733 if (count != 0) {
734 HashEntry[] tab = table;
735 for (int i = 0; i < tab.length; i++) {
736 for (HashEntry e = tab[i]; e != null; e = e.next) {
737 free(e.element);
738 }
739 tab[i] = null;
740 }
741 ++modCount;
742 // write-volatile
743 count = 0;
744 }
745 onHeapPoolAccessor.clear();
746 LOG.debug("cleared heap usage");
747 onDiskPoolAccessor.clear();
748 LOG.debug("cleared disk usage");
749 } finally {
750 writeLock().unlock();
751 }
752 }
753
754 /**
755 * Try to atomically switch (CAS) the <code>expect</code> representation of this element for the
756 * <code>fault</code> representation.
757 * <p>
758 * A successful switch will return <code>true</code>, and free the replaced element/element-proxy.
759 * A failed switch will return <code>false</code> and free the element/element-proxy which was not
760 * installed. Unlike <code>fault</code> this method can return <code>false</code> if the object
761 * could not be installed due to lock contention.
762 *
763 * @param key key to which this element (proxy) is mapped
764 * @param hash the hash of the key
765 * @param expect element (proxy) expected
766 * @param fault element (proxy) to install
767 * @return <code>true</code> if <code>fault</code> was installed
768 */
769 boolean fault(Object key, int hash, Placeholder expect, DiskMarker fault) {
770 writeLock().lock();
771 try {
772 if (count != 0) {
773 final long deltaHeapSize = onHeapPoolAccessor.replace(expect.onHeapSize, key, fault, NULL_HASH_ENTRY, cachePinned);
774 if (deltaHeapSize == Long.MAX_VALUE) {
775 remove(key, hash, null, null);
776 return false;
777 } else {
778 fault.onHeapSize = expect.onHeapSize + deltaHeapSize;
779 LOG.debug("fault removed {} from heap", deltaHeapSize);
780 }
781 final long incomingDiskSize = onDiskPoolAccessor.add(key, null, fault, cachePinned);
782 if (incomingDiskSize < 0) {
783 //todo: replace must not fail here but it could if the memory freed by the previous replace has been stolen in the meantime
784 // that's why it is forced, even if that could make the pool go over limit
785 long deleteSize = onHeapPoolAccessor.replace(fault.onHeapSize, key, expect, NULL_HASH_ENTRY, true);
786 LOG.debug("fault failed to add on disk, deleted {} from heap", deleteSize);
787 expect.onHeapSize = fault.onHeapSize + deleteSize;
788 final Element element = get(key, hash);
789 if (cacheEventNotificationService.getFrontEndCacheTier() == null
790 || cacheEventNotificationService.getFrontEndCacheTier().isEvictionCandidate(element)) {
791 notifyEviction(remove(key, hash, null, null));
792 return false;
793 }
794 return true;
795 } else {
796 LOG.debug("fault added {} on disk", incomingDiskSize);
797 }
798
799 for (HashEntry e = getFirst(hash); e != null; e = e.next) {
800 if (e.hash == hash && key.equals(e.key)) {
801 if (expect == e.element) {
802 e.element = fault;
803 free(expect);
804 return true;
805 }
806 }
807 }
808
809 //todo: replace must not fail here but it could if the memory freed by the previous replace has been stolen in the meantime
810 // that's why it is forced, even if that could make the pool go over limit
811 final long failDeltaHeapSize = onHeapPoolAccessor.replace(fault.onHeapSize, key, expect, NULL_HASH_ENTRY, true);
812 LOG.debug("fault installation failed, deleted {} from heap", failDeltaHeapSize);
813 expect.onHeapSize = fault.onHeapSize + failDeltaHeapSize;
814 onDiskPoolAccessor.delete(incomingDiskSize);
815 LOG.debug("fault installation failed deleted {} from disk", incomingDiskSize);
816 }
817 free(fault, true);
818 return false;
819 } finally {
820 writeLock().unlock();
821 }
822 }
823
824 private void notifyEviction(final Element evicted) {
825 if (evicted != null) {
826 cacheEventNotificationService.notifyElementEvicted(evicted, false);
827 }
828 }
829
830 /**
831 * Remove the matching mapping. Unlike the {@link net.sf.ehcache.store.disk.Segment#remove(Object, int, net.sf.ehcache.Element, net.sf.ehcache.store.ElementValueComparator)} method
832 * evict does referential comparison of the unretrieved substitute against the argument value.
833 *
834 * @param key key to match against
835 * @param hash spread-hash for the key
836 * @param value optional value to match against
837 * @return <code>true</code> on a successful remove
838 */
839 Element evict(Object key, int hash, DiskSubstitute value) {
840 return evict(key, hash, value, true);
841 }
842
843 /**
844 * Remove the matching mapping. Unlike the {@link net.sf.ehcache.store.disk.Segment#remove(Object, int, net.sf.ehcache.Element, net.sf.ehcache.store.ElementValueComparator)} method
845 * evict does referential comparison of the unretrieved substitute against the argument value.
846 *
847 * @param key key to match against
848 * @param hash spread-hash for the key
849 * @param value optional value to match against
850 * @param notify whether to notify if we evict something
851 * @return <code>true</code> on a successful remove
852 */
853 Element evict(Object key, int hash, DiskSubstitute value, boolean notify) {
854 if (writeLock().tryLock()) {
855 Element evictedElement = null;
856 try {
857 HashEntry[] tab = table;
858 int index = hash & (tab.length - 1);
859 HashEntry first = tab[index];
860 HashEntry e = first;
861 while (e != null && (e.hash != hash || !key.equals(e.key))) {
862 e = e.next;
863 }
864
865 if (e != null) {
866 evictedElement = decode(e.element);
867 }
868
869 if (e != null && (value == null || value == e.element)
870 && (cacheEventNotificationService.getFrontEndCacheTier() == null
871 || cacheEventNotificationService.getFrontEndCacheTier().isEvictionCandidate(evictedElement))) {
872 // All entries following removed node can stay
873 // in list, but all preceding ones need to be
874 // cloned.
875 ++modCount;
876 HashEntry newFirst = e.next;
877 for (HashEntry p = first; p != e; p = p.next) {
878 newFirst = new HashEntry(p.key, p.hash, newFirst, p.element);
879 }
880 tab[index] = newFirst;
881 /*
882 * make sure we re-get from the HashEntry - since the decode in the conditional
883 * may have faulted in a different type - we must make sure we know what type
884 * to do the free on.
885 */
886 DiskSubstitute v = e.element;
887
888 free(v);
889
890 if (v instanceof DiskMarker) {
891 final long outgoingDiskSize = onDiskPoolAccessor.delete(((DiskMarker) v).getSize());
892 LOG.debug("evicted {} from disk", outgoingDiskSize);
893 }
894 final long outgoingHeapSize = onHeapPoolAccessor.delete(v.onHeapSize);
895 LOG.debug("evicted {} from heap", outgoingHeapSize);
896
897 // write-volatile
898 count = count - 1;
899 } else {
900 evictedElement = null;
901 }
902 return evictedElement;
903 } finally {
904 writeLock().unlock();
905 if (notify && evictedElement != null) {
906 cacheEventNotificationService.notifyElementEvicted(evictedElement, false);
907 }
908 }
909 } else {
910 return null;
911 }
912 }
913
914 /**
915 * Select a random sample of elements generated by the supplied factory.
916 *
917 * @param filter filter of substitute types
918 * @param sampleSize minimum number of elements to return
919 * @param sampled collection in which to place the elements
920 * @param seed random seed for the selection
921 */
922 void addRandomSample(ElementSubstituteFilter filter, int sampleSize, Collection<DiskStorageFactory.DiskSubstitute> sampled, int seed) {
923 final HashEntry[] tab = table;
924 final int tableStart = seed & (tab.length - 1);
925 int tableIndex = tableStart;
926 do {
927 for (HashEntry e = tab[tableIndex]; e != null; e = e.next) {
928 Object value = e.element;
929 if (filter.allows(value)) {
930 sampled.add((DiskStorageFactory.DiskSubstitute) value);
931 }
932 }
933
934 if (sampled.size() >= sampleSize) {
935 return;
936 }
937
938 //move to next table slot
939 tableIndex = (tableIndex + 1) & (tab.length - 1);
940 } while (tableIndex != tableStart);
941 }
942
943 /**
944 * Creates an iterator over the HashEntry objects within this Segment.
945 * @return an iterator over the HashEntry objects within this Segment.
946 */
947 Iterator<HashEntry> hashIterator() {
948 return new HashIterator();
949 }
950
951 @Override
952 public String toString() {
953 return super.toString() + " count: " + count;
954 }
955
956 /**
957 * Will check whether a Placeholder that failed to flush to disk is lying around
958 * If so, it'll try to evict it
959 * @param key the key
960 * @param hash the key's hash
961 * @return true if a failed marker was or is still there, false otherwise
962 */
963 @FindBugsSuppressWarnings("UL_UNRELEASED_LOCK")
964 boolean cleanUpFailedMarker(final Serializable key, final int hash) {
965 boolean readLocked = false;
966 boolean failedMarker = false;
967 if (!isWriteLockedByCurrentThread()) {
968 readLock().lock();
969 readLocked = true;
970 }
971 DiskSubstitute substitute = null;
972 try {
973 if (count != 0) {
974 HashEntry e = getFirst(hash);
975 while (e != null) {
976 if (e.hash == hash && key.equals(e.key)) {
977 substitute = e.element;
978 if (substitute instanceof Placeholder) {
979 failedMarker = ((Placeholder)substitute).hasFailedToFlush();
980 break;
981 }
982 }
983 e = e.next;
984 }
985 }
986 } finally {
987 if (readLocked) {
988 readLock().unlock();
989 }
990 }
991 return failedMarker && evict(key, hash, substitute, false) != null;
992 }
993
994 /**
995 * An iterator over the HashEntry objects within this Segment.
996 */
997 final class HashIterator implements Iterator<HashEntry> {
998 private int nextTableIndex;
999 private final HashEntry[] ourTable;
1000 private HashEntry nextEntry;
1001 private HashEntry lastReturned;
1002
1003 private HashIterator() {
1004 if (count != 0) {
1005 ourTable = table;
1006 for (int j = ourTable.length - 1; j >= 0; --j) {
1007 nextEntry = ourTable[j];
1008 if (nextEntry != null) {
1009 nextTableIndex = j - 1;
1010 return;
1011 }
1012 }
1013 } else {
1014 ourTable = null;
1015 nextTableIndex = -1;
1016 }
1017 advance();
1018 }
1019
1020 private void advance() {
1021 if (nextEntry != null) {
1022 nextEntry = nextEntry.next;
1023 if (nextEntry != null) {
1024 return;
1025 }
1026 }
1027
1028 while (nextTableIndex >= 0) {
1029 nextEntry = ourTable[nextTableIndex--];
1030 if (nextEntry != null) {
1031 return;
1032 }
1033 }
1034 }
1035
1036 /**
1037 * {@inheritDoc}
1038 */
1039 public boolean hasNext() {
1040 return nextEntry != null;
1041 }
1042
1043 /**
1044 * {@inheritDoc}
1045 */
1046 public HashEntry next() {
1047 if (nextEntry == null) {
1048 throw new NoSuchElementException();
1049 }
1050 lastReturned = nextEntry;
1051 advance();
1052 return lastReturned;
1053 }
1054
1055 /**
1056 * {@inheritDoc}
1057 */
1058 public void remove() {
1059 if (lastReturned == null) {
1060 throw new IllegalStateException();
1061 }
1062 Segment.this.remove(lastReturned.key, lastReturned.hash, null, null);
1063 lastReturned = null;
1064 }
1065 }
1066
1067 /**
1068 * Return the disk hit rate
1069 * @return the disk hit rate
1070 */
1071 public float getDiskHitRate() {
1072 return diskHitRate.getRate();
1073 }
1074
1075 /**
1076 * Return the disk miss rate
1077 * @return the disk miss rate
1078 */
1079 public float getDiskMissRate() {
1080 return diskMissRate.getRate();
1081 }
1082
1083 /**
1084 * Record a hit in the disk tier
1085 */
1086 protected void diskHit() {
1087 diskHitRate.event();
1088 }
1089
1090 /**
1091 * Record a miss in the disk tier
1092 */
1093 protected void miss() {
1094 diskMissRate.event();
1095 }
1096 }
1097