diff options
| author | klm | 1998-10-22 00:48:50 +0000 |
|---|---|---|
| committer | klm | 1998-10-22 00:48:50 +0000 |
| commit | 25ebeae2168ffa3c811366fe45a54bac254037b7 (patch) | |
| tree | 25a9ddc8257f21f1d0a57d555de5d95e2e6abc88 | |
| parent | cf3105606e565e7cf6313b0eb53732a0ee1b34ba (diff) | |
| download | mailman-25ebeae2168ffa3c811366fe45a54bac254037b7.tar.gz mailman-25ebeae2168ffa3c811366fe45a54bac254037b7.tar.zst mailman-25ebeae2168ffa3c811366fe45a54bac254037b7.zip | |
Redid pending.py to:
1. Eliminate an unlocked window, where another process could also load
the db, then save after the current process and stomp its changes.
All transactions are now atomically locked.
2. Implement periodic culling of the db, to remove old entries.
Otherwise (i assume) unclaimed entries would just accumulate
forever.
3. Simplified the interface so you can only put in new entries and
retrieve them. Cookie handling is implicit. All external
functionality is now all in two methods of the 'Pending' class object.
Details:
1. Atomicity: it used to be that the lock would only be set during the
write phase. Now, the db load and save handling is not exposed - it
is taken care of in the two exposed methods, which take care of the
locking around the load/save sequence.
Pending().new(stuff...) places an item's data in the db, returning its cookie.
Pending().confirmed(cookie) returns a tuple for the data, removing the item
from the db. It returns None if the cookie is not registered.
2. Periodically, on saves that occur longer than (module var)
CULL_INTERVAL after the last save, the db is checked for stale items,
and they're removed. Items are stale if they're older than the value
of the new default config var, PENDING_REQUEST_LIFE. (The timestamp
on the db is new, but old versions lacking it will just be culled on
the first save, and have it added.)
3. Two methods provide all the functionality:
Pending().new(stuff...) places an item in the db, returning a cookie
for it.
Pending().confirmed(cookie) returns the item, removing it from the
db - or it returns None if the item is not found.
| -rw-r--r-- | Mailman/Pending.py | 146 |
1 files changed, 105 insertions, 41 deletions
diff --git a/Mailman/Pending.py b/Mailman/Pending.py index 60efd9c4f..267ab6aeb 100644 --- a/Mailman/Pending.py +++ b/Mailman/Pending.py @@ -14,59 +14,123 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -""" -Module for handling pending subscriptions +""" Track pending confirmation of subscriptions. + +Pending().new(stuff...) places an item's data in the db, returning its cookie. +Pending().confirmed(cookie) returns a tuple for the data, removing the item +from the db. It returns None if the cookie is not registered. """ import os -import sys -import posixfile import marshal import time import whrandom import mm_cfg import flock -DB_PATH = os.path.join(mm_cfg.DATA_DIR,"pending_subscriptions.db") +DB_PATH = os.path.join(mm_cfg.DATA_DIR, "pending_subscriptions.db") LOCK_PATH = os.path.join(mm_cfg.LOCK_DIR, "pending_subscriptions.lock") +PENDING_REQUEST_LIFE = mm_cfg.PENDING_REQUEST_LIFE +# Something's probably wedged if we hit this. +DB_LOCK_TIMEOUT = 30 +# Cull stale items from the db on save, after enough time since the last one: +CULL_INTERVAL = (mm_cfg.PENDING_REQUEST_LIFE / 10) +class Pending: + """Db interface for tracking pending confirmations, using random cookies. -def get_pending(): - " returns a dict containing pending information" - try: - fp = open(DB_PATH,"r" ) - except IOError: - return {} - dict = marshal.load(fp) - return dict + .new(stuff...) places an item's data in the db, returning its cookie. + .confirmed(cookie) returns a tuple for the data, removing item from db. - -def gencookie(p=None): - if p is None: - p = get_pending() - while 1: - newcookie = int(whrandom.random() * 1000000) - if p.has_key(newcookie) or newcookie < 100000: - continue + The db is occasionally culled for stale items during saves.""" + # The db is a marshalled dict with two kinds of entries; a bunch of: + # cookie: (content..., timestamp) + # and just one: + # LAST_CULL_KEY: next_cull_due_time + # Dbs lacking the LAST_CULL_KEY are culled, at which point the cull key + # is added. + LAST_CULL_KEY = "lastculltime" + def __init__(self, + db_path = DB_PATH, + lock_path = LOCK_PATH, + item_life = PENDING_REQUEST_LIFE, + cull_interval = CULL_INTERVAL, + db_lock_timeout = DB_LOCK_TIMEOUT): + self.item_life = item_life + self.db_path = db_path + self.__lock = flock.FileLock(lock_path) + self.cull_interval = cull_interval + self.db_lock_timeout = db_lock_timeout + def new(self, *content): + """Create a new entry in the pending db, returning cookie for it.""" + now = int(time.time()) + db = self.__load() + # Generate cookie between 1e5 and 1e6 and not already in the db. + while 1: + newcookie = int(whrandom.random() * 1e6) + if newcookie >= 1e5 and not db.has_key(newcookie): + break + db[newcookie] = content + (now,) # Tack on timestamp. + self.__save(db) return newcookie + def confirmed(self, cookie): + "Return entry for cookie, removing it from db, or None if not found." + content = None + got = None + db = self.__load() + try: + if db.has_key(cookie): + content = db[cookie][0:-1] # Strip off timestamp. + got = 1 + del db[cookie] + finally: + if got: + self.__save(db) + else: + self.__release_lock() + return content + def __load(self): + "Return db as dict, returning an empty one if db not yet existant." + self.__assert_lock(self.db_lock_timeout) + try: + fp = open(self.db_path,"r" ) + return marshal.load(fp) + except IOError: + # Not yet existing Initialize a fresh one: + return {self.LAST_CULL_KEY: int(time.time())} + def __save(self, db): + """Marshal dict db to file - the exception is propagated on failure. + Cull stale items from the db, if that hasn't been done in a while.""" + if not self.__lock.locked(): + raise flock.NotLockedError + # Cull if its been a while (or if cull key is missing, ie, old + # version - which will be reformed to new format by cull). + if (db.get(self.LAST_CULL_KEY, 0) + < int(time.time()) - self.cull_interval): + self.__cull_db(db) + fp = open(self.db_path, "w") + marshal.dump(db, fp) + fp.close() + self.__release_lock() + def __assert_lock(self, timeout): + """Get the lock if not already acquired, or happily just keep it. -def set_pending(p): - lock_file = flock.FileLock(LOCK_PATH) - lock_file.lock() - try: - fp = open(DB_PATH, "w") - marshal.dump(p, fp) - fp.close() - finally: - # be sure the lock file is released - lock_file.unlock() - - -def add2pending(email_addr, password, digest, cookie): - ts = int(time.time()) - processed = 0 - p = get_pending() - p[cookie] = (email_addr, password, digest, ts) - set_pending(p) - - + Raises TimeOutError if unable to get lock within timeout.""" + try: + self.__lock.lock(timeout) + except flock.AlreadyCalledLockError: + pass + def __release_lock(self): + self.__lock.unlock() + def __cull_db(self, db): + """Remove old items from db and revise last-culled timestamp.""" + now = int(time.time()) + too_old = now - self.item_life + cullkey = self.LAST_CULL_KEY + for k, v in db.items(): + if k == cullkey: + continue + if v[-1] < too_old: + del db[k] + # Register time after which a new cull is due: + db[self.LAST_CULL_KEY] = now |
