1 // ========================================================================
2 // Copyright 2004-2008 Mort Bay Consulting Pty. Ltd.
3 // ------------------------------------------------------------------------
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 // ========================================================================
14
15 package org.mortbay.terracotta.servlet;
16
17 import java.util.Collections;
18 import java.util.Enumeration;
19 import java.util.HashMap;
20 import java.util.HashSet;
21 import java.util.Hashtable;
22 import java.util.Map;
23 import java.util.Set;
24 import java.util.concurrent.Executors;
25 import java.util.concurrent.ScheduledExecutorService;
26 import java.util.concurrent.ScheduledFuture;
27 import java.util.concurrent.TimeUnit;
28
29 import javax.servlet.http.Cookie;
30 import javax.servlet.http.HttpServletRequest;
31 import javax.servlet.http.HttpSession;
32
33 import com.tc.object.bytecode.Manageable;
34 import com.tc.object.bytecode.Manager;
35 import com.tc.object.bytecode.ManagerUtil;
36 import org.mortbay.jetty.Request;
37 import org.mortbay.jetty.handler.ContextHandler;
38 import org.mortbay.jetty.servlet.AbstractSessionManager;
39 import org.mortbay.log.Log;
40
41 /**
42 * A specialized SessionManager to be used with <a href="http://www.terracotta.org">Terracotta</a>.
43 * <br />
44 * <h3>IMPLEMENTATION NOTES</h3>
45 * <h4>Requirements</h4>
46 * This implementation of the session management requires J2SE 5 or superior.
47 * <h4>Use of Hashtable</h4>
48 * In Terracotta, collections classes are
49 * <a href="http://www.terracotta.org/web/display/docs/Concept+and+Architecture+Guide">logically managed</a>
50 * and we need two levels of locking: a local locking to handle concurrent requests on the same node
51 * and a distributed locking to handle concurrent requests on different nodes.
52 * Natively synchronized classes such as Hashtable fit better than synchronized wrappers obtained via, for
53 * example, {@link Collections#synchronizedMap(Map)}. This is because Terracotta may replay the method call
54 * on the inner unsynchronized collection without invoking the external wrapper, so the synchronization will
55 * be lost. Natively synchronized collections does not have this problem.
56 * <h4>Use of Hashtable as a Set</h4>
57 * There is no natively synchronized Set implementation, so we use Hashtable instead, see
58 * {@link TerracottaSessionIdManager}.
59 * However, we don't map the session id to itself, because Strings are treated specially by Terracotta,
60 * causing more traffic to the Terracotta server. Instead we use the same pattern used in the implementation
61 * of <code>java.util.HashSet</code>: use a single shared object to indicate the presence of a key.
62 * This is necessary since Hashtable does not allow null values.
63 * <h4>Sessions expiration map</h4>
64 * In order to scavenge expired sessions, we need a way to know if they are expired. This information
65 * is normally held in the session itself via the <code>lastAccessedTime</code> property.
66 * However, we would need to iterate over all sessions to check if each one is expired, and this migrates
67 * all sessions to the node, causing a lot of unneeded traffic between nodes and the Terracotta server.
68 * To avoid this, we keep a separate map from session id to expiration time, so we only need to migrate
69 * all the expirations times to see if a session is expired or not.
70 * <h4>Update of lastAccessedTime</h4>
71 * As a performance improvement, the lastAccessedTime is updated only periodically, and not every time
72 * a request enters a node. This optimization allows applications that have frequent requests but less
73 * frequent accesses to the session to perform better, because the traffic between the node and the
74 * Terracotta server is reduced. The update period is the scavenger period, see {@link Session#access(long)}.
75 * <h4>Terracotta lock id</h4>
76 * The Terracotta lock id is based on the session id, but this alone is not sufficient, as there may be
77 * two sessions with the same id for two different contexts. So we need session id and context path.
78 * However, this also is not enough, as we may have the rare case of the same webapp mapped to two different
79 * virtual hosts, and each virtual host must have a different session object.
80 * Therefore the lock id we need to use is a combination of session id, context path and virtual host, see
81 * {@link #newLockId(String)}.
82 *
83 * @see TerracottaSessionIdManager
84 */
85 public class TerracottaSessionManager extends AbstractSessionManager implements Runnable
86 {
87 /**
88 * The local cache of session objects.
89 */
90 private Map<String, Session> _sessions;
91 /**
92 * The distributed shared SessionData map.
93 * Putting objects into the map result in the objects being sent to Terracotta, and any change
94 * to the objects are also replicated, recursively.
95 * Getting objects from the map result in the objects being fetched from Terracotta.
96 */
97 private Hashtable<String, SessionData> _sessionDatas;
98 /**
99 * The distributed shared session expirations map, needed for scavenging.
100 * In particular it supports removal of sessions that have been orphaned by nodeA
101 * (for example because it crashed) by virtue of scavenging performed by nodeB.
102 */
103 private Hashtable<String, MutableLong> _sessionExpirations;
104 private String _contextPath;
105 private String _virtualHost;
106 private long _scavengePeriodMs = 30000;
107 private ScheduledExecutorService _scheduler;
108 private ScheduledFuture<?> _scavenger;
109
110 public void doStart() throws Exception
111 {
112 super.doStart();
113
114 _contextPath = canonicalize(_context.getContextPath());
115 _virtualHost = virtualHostFrom(_context);
116
117 _sessions = Collections.synchronizedMap(new HashMap<String, Session>());
118 _sessionDatas = newSharedMap("sessionData:" + _contextPath + ":" + _virtualHost);
119 _sessionExpirations = newSharedMap("sessionExpirations:" + _contextPath + ":" + _virtualHost);
120 _scheduler = Executors.newSingleThreadScheduledExecutor();
121 scheduleScavenging();
122 }
123
124 private Hashtable newSharedMap(String name)
125 {
126 // We want to partition the session data among contexts, so we need to have different roots for
127 // different contexts, and each root must have a different name, since roots with the same name are shared.
128 Lock.lock(name);
129 try
130 {
131 // We need a synchronized data structure to have node-local synchronization.
132 // We use Hashtable because it is a natively synchronized collection that behaves
133 // better in Terracotta than synchronized wrappers obtained with Collections.synchronized*().
134 Hashtable result = (Hashtable)ManagerUtil.lookupOrCreateRootNoDepth(name, new Hashtable());
135 ((Manageable)result).__tc_managed().disableAutoLocking();
136 return result;
137 }
138 finally
139 {
140 Lock.unlock(name);
141 }
142 }
143
144 private void scheduleScavenging()
145 {
146 if (_scavenger != null)
147 {
148 _scavenger.cancel(false);
149 _scavenger = null;
150 }
151 long scavengePeriod = getScavengePeriodMs();
152 if (scavengePeriod > 0 && _scheduler != null)
153 _scavenger = _scheduler.scheduleWithFixedDelay(this, scavengePeriod, scavengePeriod, TimeUnit.MILLISECONDS);
154 }
155
156 public void doStop() throws Exception
157 {
158 if (_scavenger != null) _scavenger.cancel(true);
159 if (_scheduler != null) _scheduler.shutdownNow();
160 super.doStop();
161 }
162
163 public void run()
164 {
165 scavenge();
166 }
167
168 public void enter(Request request)
169 {
170 /**
171 * SESSION LOCKING
172 * This is an entry point for session locking.
173 * We arrive here at the beginning of every request
174 */
175
176 String requestedSessionId = request.getRequestedSessionId();
177 HttpSession session = request.getSession(false);
178 Log.debug("Entering, requested session id {}, session id {}", requestedSessionId, session == null ? null : getClusterId(session));
179 if (requestedSessionId == null)
180 {
181 // The request does not have a session id, do not lock.
182 // If the session, later in the request, is created by the user,
183 // it will be locked when it will be created
184 }
185 else
186 {
187 // We lock anyway with the requested session id.
188 // The requested session id may not be a valid one,
189 // for example because the session expired.
190 // If the user creates a new session, it will have
191 // a different session id and that also will be locked.
192 enter(getIdManager().getClusterId(requestedSessionId));
193 }
194 }
195
196 protected void enter(String clusterId)
197 {
198 Lock.lock(newLockId(clusterId));
199 Log.debug("Entered, session id {}", clusterId);
200 }
201
202 protected boolean tryEnter(String clusterId)
203 {
204 return Lock.tryLock(newLockId(clusterId));
205 }
206
207 public void exit(Request request)
208 {
209 /**
210 * SESSION LOCKING
211 * This is an exit point for session locking.
212 * We arrive here at the end of every request
213 */
214
215 String requestedSessionId = request.getRequestedSessionId();
216 HttpSession session = request.getSession(false);
217 Log.debug("Exiting, requested session id {}, session id {}", requestedSessionId, session == null ? null : getClusterId(session));
218 if (requestedSessionId == null)
219 {
220 if (session == null)
221 {
222 // No session has been created in the request, just return
223 }
224 else
225 {
226 // A new session has been created by the user, unlock it
227 exit(getClusterId(session));
228 }
229 }
230 else
231 {
232 // There was a requested session id, and we locked it, so here release it
233 String requestedClusterId = getIdManager().getClusterId(requestedSessionId);
234 exit(requestedClusterId);
235
236 if (session != null)
237 {
238 if (!requestedClusterId.equals(getClusterId(session)))
239 {
240 // The requested session id was invalid, and a
241 // new session has been created by the user with
242 // a different session id, unlock it
243 exit(getClusterId(session));
244 }
245 }
246 }
247 }
248
249 protected void exit(String clusterId)
250 {
251 Lock.unlock(newLockId(clusterId));
252 Log.debug("Exited, session id {}", clusterId);
253 }
254
255 protected void addSession(AbstractSessionManager.Session session)
256 {
257 /**
258 * SESSION LOCKING
259 * When this method is called, we already hold the session lock.
260 * See {@link #newSession(HttpServletRequest)}
261 */
262 String clusterId = getClusterId(session);
263 Session tcSession = (Session)session;
264 SessionData sessionData = tcSession.getSessionData();
265 _sessionExpirations.put(clusterId, sessionData._expiration);
266 _sessionDatas.put(clusterId, sessionData);
267 _sessions.put(clusterId, tcSession);
268 Log.debug("Added session {} with id {}", tcSession, clusterId);
269 }
270
271 @Override
272 public Cookie access(HttpSession session, boolean secure)
273 {
274 Cookie cookie = super.access(session, secure);
275 Log.debug("Accessed session {} with id {}", session, session.getId());
276 return cookie;
277 }
278
279 @Override
280 public void complete(HttpSession session)
281 {
282 super.complete(session);
283 Log.debug("Completed session {} with id {}", session, session.getId());
284 }
285
286 protected void removeSession(String clusterId)
287 {
288 /**
289 * SESSION LOCKING
290 * When this method is called, we already hold the session lock.
291 * Either the scavenger acquired it, or the user invalidated
292 * the existing session and thus {@link #enter(String)} was called.
293 */
294
295 // Remove locally cached session
296 Session session = _sessions.remove(clusterId);
297 Log.debug("Removed session {} with id {}", session, clusterId);
298
299 // It may happen that one node removes its expired session data,
300 // so that when this node does the same, the session data is already gone
301 SessionData sessionData = _sessionDatas.remove(clusterId);
302 Log.debug("Removed session data {} with id {}", sessionData, clusterId);
303
304 // Remove the expiration entry used in scavenging
305 _sessionExpirations.remove(clusterId);
306 }
307
308 public void setScavengePeriodMs(long ms)
309 {
310 ms = ms == 0 ? 60000: ms;
311 ms = ms > 60000 ? 60000: ms;
312 ms = ms < 1000 ? 1000: ms;
313 this._scavengePeriodMs = ms;
314 scheduleScavenging();
315 }
316
317 public long getScavengePeriodMs()
318 {
319 return _scavengePeriodMs;
320 }
321
322 public AbstractSessionManager.Session getSession(String clusterId)
323 {
324 Session result = null;
325
326 /**
327 * SESSION LOCKING
328 * This is an entry point for session locking.
329 * We lookup the session given the id, and if it exist we hold the lock.
330 * We unlock on end of method, since this method can be called outside
331 * an {@link #enter(String)}/{@link #exit(String)} pair.
332 */
333 enter(clusterId);
334 try
335 {
336 // Need to synchronize because we use a get-then-put that must be atomic
337 // on the local session cache
338 // Refer to method {@link #scavenge()} for an explanation of synchronization order:
339 // first on _sessions, then on _sessionExpirations.
340 synchronized (_sessions)
341 {
342 result = _sessions.get(clusterId);
343 if (result == null)
344 {
345 Log.debug("Session with id {} --> local cache miss", clusterId);
346
347 // Lookup the distributed shared sessionData object.
348 // This will migrate the session data to this node from the Terracotta server
349 // We have not grabbed the distributed lock associated with this session yet,
350 // so another node can migrate the session data as well. This is no problem,
351 // since just after this method returns the distributed lock will be grabbed by
352 // one node, the session data will be changed and the lock released.
353 // The second node contending for the distributed lock will then acquire it,
354 // and the session data information will be migrated lazily by Terracotta means.
355 // We are only interested in having a SessionData reference locally.
356 Log.debug("Distributed session data with id {} --> lookup", clusterId);
357 SessionData sessionData = _sessionDatas.get(clusterId);
358 if (sessionData == null)
359 {
360 Log.debug("Distributed session data with id {} --> not found", clusterId);
361 }
362 else
363 {
364 Log.debug("Distributed session data with id {} --> found", clusterId);
365 // Wrap the migrated session data and cache the Session object
366 result = new Session(sessionData);
367 _sessions.put(clusterId, result);
368 }
369 }
370 else
371 {
372 Log.debug("Session with id {} --> local cache hit", clusterId);
373 if (!_sessionExpirations.containsKey(clusterId))
374 {
375 // A session is present in the local cache, but it has been expired
376 // or invalidated on another node, perform local clean up.
377 _sessions.remove(clusterId);
378 result = null;
379 Log.debug("Session with id {} --> local cache stale");
380 }
381 }
382 }
383 }
384 finally
385 {
386 /**
387 * SESSION LOCKING
388 */
389 exit(clusterId);
390 }
391 return result;
392 }
393
394 protected String newLockId(String clusterId)
395 {
396 StringBuilder builder = new StringBuilder(clusterId);
397 builder.append(":").append(_contextPath);
398 builder.append(":").append(_virtualHost);
399 return builder.toString();
400 }
401
402 // TODO: This method is not needed, only used for testing
403 public Map getSessionMap()
404 {
405 return Collections.unmodifiableMap(_sessions);
406 }
407
408 // TODO: rename to getSessionsCount()
409 // TODO: also, not used if not by superclass for unused statistics data
410 public int getSessions()
411 {
412 return _sessions.size();
413 }
414
415 protected Session newSession(HttpServletRequest request)
416 {
417 /**
418 * SESSION LOCKING
419 * This is an entry point for session locking.
420 * We arrive here when we have to create a new
421 * session, for a request.getSession(true) call.
422 */
423 Session result = new Session(request);
424
425 String requestedSessionId = request.getRequestedSessionId();
426 if (requestedSessionId == null)
427 {
428 // Here the user requested a fresh new session, lock it.
429 enter(result.getClusterId());
430 }
431 else
432 {
433 if (result.getClusterId().equals(getIdManager().getClusterId(requestedSessionId)))
434 {
435 // Here we have a cross context dispatch where the same session id
436 // is used for two different sessions; we do not lock because the lock
437 // has already been acquired in enter(Request), based on the requested
438 // session id.
439 }
440 else
441 {
442 // Here the requested session id is invalid (the session expired),
443 // and a new session is created, lock it.
444 enter(result.getClusterId());
445 }
446 }
447 return result;
448 }
449
450 protected void invalidateSessions()
451 {
452 // Do nothing.
453 // We don't want to remove and invalidate all the sessions,
454 // because this method is called from doStop(), and just
455 // because this context is stopping does not mean that we
456 // should remove the session from any other node (remember
457 // the session map is shared)
458 }
459
460 private void scavenge()
461 {
462 Thread thread = Thread.currentThread();
463 ClassLoader old_loader = thread.getContextClassLoader();
464 if (_loader != null) thread.setContextClassLoader(_loader);
465 try
466 {
467 long now = System.currentTimeMillis();
468 Log.debug(this + " scavenging at {}, scavenge period {}", now, getScavengePeriodMs());
469
470 // Detect the candidates that may have expired already, checking the estimated expiration time.
471 Set<String> candidates = new HashSet<String>();
472 String lockId = "scavenge:" + _contextPath + ":" + _virtualHost;
473 Lock.lock(lockId);
474 try
475 {
476 /**
477 * Synchronize in order, to avoid deadlocks with method {@link #getSession(String)}.
478 * In that method, we first synchronize on _session, then we call _sessionExpirations.containsKey(),
479 * which is synchronized by virtue of being a Collection.synchronizedMap.
480 * Here we must synchronize in the same order to avoid deadlock.
481 */
482 synchronized (_sessions)
483 {
484 synchronized (_sessionExpirations)
485 {
486 // Do not use iterators that throw ConcurrentModificationException
487 // We do a best effort here, and leave possible imprecisions to the next scavenge
488 Enumeration<String> keys = _sessionExpirations.keys();
489 while (keys.hasMoreElements())
490 {
491 String sessionId = keys.nextElement();
492 MutableLong value = _sessionExpirations.get(sessionId);
493 if (value != null)
494 {
495 long expirationTime = value.value;
496 Log.debug("Estimated expiration time {} for session {}", expirationTime, sessionId);
497 if (expirationTime > 0 && expirationTime < now) candidates.add(sessionId);
498 }
499 }
500
501 _sessions.keySet().retainAll(Collections.list(_sessionExpirations.keys()));
502 }
503 }
504 }
505 finally
506 {
507 Lock.unlock(lockId);
508 }
509 Log.debug("Scavenging detected {} candidate sessions to expire", candidates.size());
510
511 // Now validate that the candidates that do expire are really expired,
512 // grabbing the session lock for each candidate
513 for (String sessionId : candidates)
514 {
515 Session candidate = (Session)getSession(sessionId);
516 if (candidate == null)
517 continue;
518
519 // Here we grab the lock to avoid anyone else interfering
520 boolean entered = tryEnter(sessionId);
521 if (entered)
522 {
523 try
524 {
525 long maxInactiveTime = candidate.getMaxIdlePeriodMs();
526 // Exclude sessions that never expire
527 if (maxInactiveTime > 0)
528 {
529 // The lastAccessedTime is fetched from Terracotta, so we're sure it is up-to-date.
530 long lastAccessedTime = candidate.getLastAccessedTime();
531 // Since we write the shared lastAccessedTime every scavenge period,
532 // take that in account before considering the session expired
533 long expirationTime = lastAccessedTime + maxInactiveTime + getScavengePeriodMs();
534 if (expirationTime < now)
535 {
536 Log.debug("Scavenging expired session {}, expirationTime {}", candidate.getClusterId(), expirationTime);
537 // Calling timeout() result in calling removeSession(), that will clean the data structures
538 candidate.timeout();
539 }
540 else
541 {
542 Log.debug("Scavenging skipping candidate session {}, expirationTime {}", candidate.getClusterId(), expirationTime);
543 }
544 }
545 }
546 finally
547 {
548 exit(sessionId);
549 }
550 }
551 }
552
553 int sessionCount = getSessions();
554 if (sessionCount < _minSessions) _minSessions = sessionCount;
555 if (sessionCount > _maxSessions) _maxSessions = sessionCount;
556 }
557 catch (Throwable x)
558 {
559 // Must avoid at all costs that the scavenge thread exits, so here we catch and log
560 if(x instanceof ThreadDeath)
561 throw (ThreadDeath)x;
562 Log.warn("Problem scavenging sessions", x);
563 }
564 finally
565 {
566 thread.setContextClassLoader(old_loader);
567 }
568 }
569
570 private String canonicalize(String contextPath)
571 {
572 if (contextPath == null) return "";
573 return contextPath.replace('/', '_').replace('.', '_').replace('\\', '_');
574 }
575
576 private String virtualHostFrom(ContextHandler.SContext context)
577 {
578 String result = "0.0.0.0";
579 if (context == null) return result;
580
581 String[] vhosts = context.getContextHandler().getVirtualHosts();
582 if (vhosts == null || vhosts.length == 0 || vhosts[0] == null) return result;
583
584 return vhosts[0];
585 }
586
587 class Session extends AbstractSessionManager.Session
588 {
589 private static final long serialVersionUID = -2134521374206116367L;
590
591 private final SessionData _sessionData;
592 private long _lastUpdate;
593
594 protected Session(HttpServletRequest request)
595 {
596 super(request);
597 _sessionData = new SessionData(getClusterId(), _maxIdleMs);
598 _lastAccessed = _sessionData.getCreationTime();
599 }
600
601 protected Session(SessionData sd)
602 {
603 super(sd.getCreationTime(), sd.getId());
604 _sessionData = sd;
605 _lastAccessed = getLastAccessedTime();
606 initValues();
607 }
608
609 public SessionData getSessionData()
610 {
611 return _sessionData;
612 }
613
614 @Override
615 public long getCookieSetTime()
616 {
617 return _sessionData.getCookieTime();
618 }
619
620 @Override
621 protected void cookieSet()
622 {
623 _sessionData.setCookieTime(getLastAccessedTime());
624 }
625
626 @Override
627 public void setMaxInactiveInterval(int secs)
628 {
629 super.setMaxInactiveInterval(secs);
630 if(_maxIdleMs > 0L && _maxIdleMs / 10L < (long)_scavengePeriodMs)
631 {
632 long newScavengeSecs = (secs + 9) / 10;
633 setScavengePeriodMs(1000L * newScavengeSecs);
634 }
635
636 // Update the estimated expiration time
637 if (secs < 0) {
638 this._sessionData._expiration.value = -1L;
639 } else {
640 this._sessionData._expiration.value = System.currentTimeMillis() + (1000L * secs);
641 }
642 }
643
644 @Override
645 public long getLastAccessedTime()
646 {
647 if (!isValid()) throw new IllegalStateException();
648 return _sessionData.getPreviousAccessTime();
649 }
650
651 @Override
652 public long getCreationTime() throws IllegalStateException
653 {
654 if (!isValid()) throw new IllegalStateException();
655 return _sessionData.getCreationTime();
656 }
657
658 // Overridden for visibility
659 @Override
660 protected String getClusterId()
661 {
662 return super.getClusterId();
663 }
664
665 protected Map newAttributeMap()
666 {
667 // It is important to never return a new attribute map here (as other Session implementations do),
668 // but always return the shared attributes map, so that a new session created on a different cluster
669 // node is immediately filled with the session data from Terracotta.
670 return _sessionData.getAttributeMap();
671 }
672
673 @Override
674 protected void access(long time)
675 {
676 // The local previous access time is always updated via the super.access() call.
677 // If the requests are steady and within the scavenge period, the distributed shared access times
678 // are never updated. If only one node gets hits, other nodes reach the expiration time and the
679 // scavenging on other nodes will believe the session is expired, since the distributed shared
680 // access times have never been updated.
681 // Therefore we need to update the distributed shared access times once in a while, no matter what.
682 long previousAccessTime = getPreviousAccessTime();
683 if (time - previousAccessTime > getScavengePeriodMs())
684 {
685 Log.debug("Out-of-date update of distributed access times: previous {} - current {}", previousAccessTime, time);
686 updateAccessTimes(time);
687 }
688 else
689 {
690 if (time - _lastUpdate > getScavengePeriodMs())
691 {
692 Log.debug("Periodic update of distributed access times: last update {} - current {}", _lastUpdate, time);
693 updateAccessTimes(time);
694 }
695 else
696 {
697 Log.debug("Skipping update of distributed access times: previous {} - current {}", previousAccessTime, time);
698 }
699 }
700 super.access(time);
701 }
702
703 /**
704 * Updates the shared distributed access times that need to be updated
705 *
706 * @param time the update value
707 */
708 private void updateAccessTimes(long time)
709 {
710 _sessionData.setPreviousAccessTime(_accessed);
711 if (getMaxIdlePeriodMs() > 0) _sessionData.setExpirationTime(time + getMaxIdlePeriodMs());
712 _lastUpdate = time;
713 }
714
715 // Overridden for visibility
716 @Override
717 protected void timeout()
718 {
719 super.timeout();
720 Log.debug("Timed out session {} with id {}", this, getClusterId());
721 }
722
723 @Override
724 public void invalidate()
725 {
726 super.invalidate();
727 Log.debug("Invalidated session {} with id {}", this, getClusterId());
728 }
729
730 private long getMaxIdlePeriodMs()
731 {
732 return _maxIdleMs;
733 }
734
735 private long getPreviousAccessTime()
736 {
737 return super.getLastAccessedTime();
738 }
739 }
740
741 /**
742 * The session data that is distributed to cluster nodes via Terracotta.
743 */
744 public static class SessionData
745 {
746 private final String _id;
747 private final Map _attributes;
748 private final long _creation;
749 private final MutableLong _expiration;
750 private long _previousAccess;
751 private long _cookieTime;
752
753 public SessionData(String sessionId, long maxIdleMs)
754 {
755 _id = sessionId;
756 // Don't need synchronization, as we grab a distributed session id lock
757 // when this map is accessed.
758 _attributes = new HashMap();
759 _creation = System.currentTimeMillis();
760 _expiration = new MutableLong();
761 _previousAccess = _creation;
762 // Set expiration time to negative value if the session never expires
763 _expiration.value = maxIdleMs > 0 ? _creation + maxIdleMs : -1L;
764 }
765
766 public String getId()
767 {
768 return _id;
769 }
770
771 protected Map getAttributeMap()
772 {
773 return _attributes;
774 }
775
776 public long getCreationTime()
777 {
778 return _creation;
779 }
780
781 public long getExpirationTime()
782 {
783 return _expiration.value;
784 }
785
786 public void setExpirationTime(long time)
787 {
788 _expiration.value = time;
789 }
790
791 public long getCookieTime()
792 {
793 return _cookieTime;
794 }
795
796 public void setCookieTime(long time)
797 {
798 _cookieTime = time;
799 }
800
801 public long getPreviousAccessTime()
802 {
803 return _previousAccess;
804 }
805
806 public void setPreviousAccessTime(long time)
807 {
808 _previousAccess = time;
809 }
810 }
811
812 protected static class Lock
813 {
814 private static final ThreadLocal<Map<String, Integer>> nestings = new ThreadLocal<Map<String, Integer>>()
815 {
816 @Override
817 protected Map<String, Integer> initialValue()
818 {
819 return new HashMap<String, Integer>();
820 }
821 };
822
823 private Lock()
824 {
825 }
826
827 public static void lock(String lockId)
828 {
829 Integer nestingLevel = nestings.get().get(lockId);
830 if (nestingLevel == null) nestingLevel = 0;
831 if (nestingLevel < 0)
832 throw new AssertionError("Lock(" + lockId + ") nest level = " + nestingLevel + ", thread " + Thread.currentThread() + ": " + getLocks());
833 if (nestingLevel == 0)
834 {
835 ManagerUtil.beginLock(lockId, Manager.LOCK_TYPE_WRITE);
836 Log.debug("Lock({}) acquired by thread {}", lockId, Thread.currentThread().getName());
837 }
838 nestings.get().put(lockId, nestingLevel + 1);
839 Log.debug("Lock({}) nestings {}", lockId, getLocks());
840 }
841
842 public static boolean tryLock(String lockId)
843 {
844 boolean result = ManagerUtil.tryBeginLock(lockId, Manager.LOCK_TYPE_WRITE);
845 Log.debug("Lock({}) tried and" + (result ? "" : " not") + " acquired by thread {}", lockId, Thread.currentThread().getName());
846 if (result)
847 {
848 Integer nestingLevel = nestings.get().get(lockId);
849 if (nestingLevel == null) nestingLevel = 0;
850 nestings.get().put(lockId, nestingLevel + 1);
851 Log.debug("Lock({}) nestings {}", lockId, getLocks());
852 }
853 return result;
854 }
855
856 public static void unlock(String lockId)
857 {
858 Integer nestingLevel = nestings.get().get(lockId);
859 if (nestingLevel == null) return;
860 if (nestingLevel < 1)
861 throw new AssertionError("Lock(" + lockId + ") nest level = " + nestingLevel + ", thread " + Thread.currentThread() + ": " + getLocks());
862 if (nestingLevel == 1)
863 {
864 ManagerUtil.commitLock(lockId);
865 Log.debug("Lock({}) released by thread {}", lockId, Thread.currentThread().getName());
866 nestings.get().remove(lockId);
867 }
868 else
869 {
870 nestings.get().put(lockId, nestingLevel - 1);
871 }
872 Log.debug("Lock({}) nestings {}", lockId, getLocks());
873 }
874
875 /**
876 * For testing and debugging purposes only.
877 * @return the lock ids held by the current thread
878 */
879 protected static Map<String, Integer> getLocks()
880 {
881 return Collections.unmodifiableMap(nestings.get());
882 }
883 }
884
885 private static class MutableLong
886 {
887 private long value;
888 }
889 }