summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbwarsaw2007-04-09 22:16:21 +0000
committerbwarsaw2007-04-09 22:16:21 +0000
commit7c7465b4040861e73d24650eb6dadaf625a0d3d8 (patch)
treef7e86474ac7f3f3a48e35f8e9d42ac86f2f5c170
parent081e590fab0f512f7ddb4a7e5c3fdc60ca154cff (diff)
downloadmailman-7c7465b4040861e73d24650eb6dadaf625a0d3d8.tar.gz
mailman-7c7465b4040861e73d24650eb6dadaf625a0d3d8.tar.zst
mailman-7c7465b4040861e73d24650eb6dadaf625a0d3d8.zip
Improve the way we handle avoiding InitTempVars() multiple times on an
instantiated MailList object via the mapper extension's populate_instance() method. This based on information from the SQLAlchemy folks. Add more useful output for LockFile debugging. Add checks in loginit.py's emit() method (and .flush()) so that if the stream has been closed, log messages will go to stderr. This happens under the test suite with SQLAlchemy, because SA keeps references to the MailList objects which it doesn't seem like we can clear before Python exits. So if say the lock logger is at debug level, when the lock object gets cleared at Python shutdown, the stream will have been closed by the time LockFile.__del__() gets called. This change avoids the traceback at the expense of a little extra stderr output. MailList.__lock -> MailList._lock MailList.__timestamp -> MailList._timestamp
-rw-r--r--Mailman/LockFile.py19
-rw-r--r--Mailman/MailList.py38
-rw-r--r--Mailman/database/dbcontext.py1
-rw-r--r--Mailman/database/listdata.py16
-rw-r--r--Mailman/loginit.py12
5 files changed, 55 insertions, 31 deletions
diff --git a/Mailman/LockFile.py b/Mailman/LockFile.py
index 20f6349ef..b240ce5a4 100644
--- a/Mailman/LockFile.py
+++ b/Mailman/LockFile.py
@@ -204,7 +204,7 @@ class LockFile:
# very rare occurence, only happens from cron, and (only?) on Solaris
# 2.6.
self._touch()
- log.debug('laying claim')
+ log.debug('laying claim: %s', self._lockfile)
# for quieting the logging output
loopcount = -1
while True:
@@ -215,7 +215,7 @@ class LockFile:
# If we got here, we know we know we got the lock, and never
# had it before, so we're done. Just touch it again for the
# fun of it.
- log.debug('got the lock')
+ log.debug('got the lock: %s', self._lockfile)
self._touch()
break
except OSError, e:
@@ -243,7 +243,7 @@ class LockFile:
log.error('unexpected linkcount: %d', self._linkcount())
elif self._read() == self._tmpfname:
# It was us that already had the link.
- log.debug('already locked')
+ log.debug('already locked: %s', self._lockfile)
raise AlreadyLockedError
# otherwise, someone else has the lock
pass
@@ -263,7 +263,7 @@ class LockFile:
# and the expected lock lifetime hasn't expired yet. So let's
# wait a while for the owner of the lock to give it up.
elif not loopcount % 100:
- log.debug('waiting for claim')
+ log.debug('waiting for claim: %s', self._lockfile)
self._sleep()
def unlock(self, unconditionally=False):
@@ -289,7 +289,7 @@ class LockFile:
except OSError, e:
if e.errno <> errno.ENOENT:
raise
- log.debug('unlocked')
+ log.debug('unlocked: %s', self._lockfile)
def locked(self):
"""Return true if we own the lock, false if we do not.
@@ -313,12 +313,11 @@ class LockFile:
return self._read() == self._tmpfname
def finalize(self):
- log.debug('finalize')
+ log.debug('finalize: %s', self._lockfile)
self.unlock(unconditionally=True)
- # XXX Can we just get rid of __del__()?
def __del__(self):
- log.debug('__del__')
+ log.debug('__del__: %s', self._lockfile)
if self._owned:
self.finalize()
@@ -351,7 +350,7 @@ class LockFile:
# And do some sanity checks
assert self._linkcount() == 2
assert self.locked()
- log.debug('transferred the lock')
+ log.debug('transferred the lock: %s', self._lockfile)
def _take_possession(self):
self._tmpfname = tmpfname = '%s.%s.%d' % (
@@ -360,7 +359,7 @@ class LockFile:
# the transfer.
while self._linkcount() <> 2 or self._read() <> tmpfname:
time.sleep(0.25)
- log.debug('took possession of the lock')
+ log.debug('took possession of the lock: %s', self._lockfile)
def _disown(self):
self._owned = False
diff --git a/Mailman/MailList.py b/Mailman/MailList.py
index e2979b692..cb310dd74 100644
--- a/Mailman/MailList.py
+++ b/Mailman/MailList.py
@@ -180,9 +180,16 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
#
# Lock management
#
+ def _make_lock(self, name, lock=False):
+ self._lock = LockFile.LockFile(
+ os.path.join(config.LOCK_DIR, name) + '.lock',
+ lifetime=config.LIST_LOCK_LIFETIME)
+ if lock:
+ self._lock.lock()
+
def Lock(self, timeout=0):
database.lock(self)
- self.__lock.lock(timeout)
+ self._lock.lock(timeout)
self._memberadaptor.lock()
# Must reload our database for consistency. Watch out for lists that
# don't exist.
@@ -194,11 +201,11 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
def Unlock(self):
database.unlock(self)
- self.__lock.unlock(unconditionally=True)
+ self._lock.unlock(unconditionally=True)
self._memberadaptor.unlock()
def Locked(self):
- return self.__lock.locked()
+ return self._lock.locked()
@@ -303,6 +310,14 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
#
def InitTempVars(self, name, check_version=True):
"""Set transient variables of this and inherited classes."""
+ # Because of the semantics of the database layer, it's possible that
+ # this method gets called more than once on an existing object. For
+ # example, if the MailList object is expunged from the current db
+ # session, then this may get called again when the object's persistent
+ # attributes are re-read from the database. This can have nasty
+ # consequences, so ensure that we're only called once.
+ if hasattr(self, '_lock'):
+ return
# Attach a membership adaptor instance.
parts = config.MEMBER_ADAPTOR_CLASS.split(DOT)
adaptor_class = parts.pop()
@@ -313,10 +328,8 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
# The timestamp is set whenever we load the state from disk. If our
# timestamp is newer than the modtime of the config.pck file, we don't
# need to reload, otherwise... we do.
- self.__timestamp = 0
- self.__lock = LockFile.LockFile(
- os.path.join(config.LOCK_DIR, name or '<site>') + '.lock',
- lifetime=config.LIST_LOCK_LIFETIME)
+ self._timestamp = 0
+ self._make_lock(name or '<site>')
# XXX FIXME Sometimes name is fully qualified, sometimes it's not.
if name:
if '@' in name:
@@ -549,8 +562,11 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
self._internal_name = self.list_name = listname
self._full_path = os.path.join(config.LIST_DATA_DIR, fqdn_listname)
Utils.makedirs(self._full_path)
- # Don't use Lock() since that tries to load the non-existant config.pck
- self.__lock.lock()
+ # Don't use Lock() since that tries to load the non-existant
+ # config.pck. However, it's likely that the current lock is a site
+ # lock because the MailList was instantiated with no name. Blow away
+ # the current lock object and re-instantiate it.
+ self._make_lock(fqdn_listname, lock=True)
# We need to set this attribute before calling InitVars() because
# Archiver.InitVars() depends on this to calculate and create the
# archive directory. Without this, Archiver will create a bogus
@@ -573,7 +589,7 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
# interested in it. This will raise a NotLockedError if we don't have
# the lock (which is a serious problem!). TBD: do we need to be more
# defensive?
- self.__lock.refresh()
+ self._lock.refresh()
# Commit the database transaction
database.save(self)
# The member adaptor may have its own save operation
@@ -607,7 +623,7 @@ class MailList(object, HTMLFormatter, Deliverer, ListAdmin,
self.InitVars()
# Then reload the database (but don't recurse). Force a reload even
# if we have the most up-to-date state.
- self.__timestamp = 0
+ self._timestamp = 0
self.Load(self.fqdn_listname, check_version=False)
# We must hold the list lock in order to update the schema
waslocked = self.Locked()
diff --git a/Mailman/database/dbcontext.py b/Mailman/database/dbcontext.py
index 3482122c9..5421fb6f2 100644
--- a/Mailman/database/dbcontext.py
+++ b/Mailman/database/dbcontext.py
@@ -98,6 +98,7 @@ class DBContext(object):
def close(self):
self.session.close()
+ self.session = None
def _touch(self, url):
parts = urlparse(url)
diff --git a/Mailman/database/listdata.py b/Mailman/database/listdata.py
index 05ea967ff..9f537b229 100644
--- a/Mailman/database/listdata.py
+++ b/Mailman/database/listdata.py
@@ -173,13 +173,15 @@ def make_table(metadata, tables):
class MailListMapperExtension(MapperExtension):
def populate_instance(self, mapper, context, row, mlist, ikey, isnew):
- # isnew doesn't really seem to give us what we want. Specifically, if
- # we expire the MailList object from the session, we'll get called
- # with populate_instance(..., isnew=True) when the object is reloaded
- # from the database. We'll still check isnew, but we'll also check to
- # make sure there's no _memberadaptor attribute, since that is set in
- # the InitTempVars() method.
- if isnew and not hasattr(mlist, '_memberadaptor'):
+ # Michael Bayer on the sqlalchemy mailing list:
+ #
+ # "isnew" is used to indicate that we are going to populate the
+ # instance with data from the database, *and* that this particular row
+ # is the first row in the result which has indicated the presence of
+ # this entity (i.e. the primary key points to it). this implies that
+ # populate_instance() can be called *multiple times* for the instance,
+ # if multiple successive rows all contain its particular primary key.
+ if isnew:
# Get the list name and host name -- which are required by
# InitTempVars() from the row data.
list_name = row['listdata_list_name']
diff --git a/Mailman/loginit.py b/Mailman/loginit.py
index 71a5e9722..f2646160e 100644
--- a/Mailman/loginit.py
+++ b/Mailman/loginit.py
@@ -80,16 +80,21 @@ class ReopenableFileHandler(logging.Handler):
return codecs.open(self._filename, 'a', 'utf-8')
def flush(self):
- self._stream.flush()
+ if self._stream:
+ self._stream.flush()
def emit(self, record):
+ # It's possible for the stream to have been closed by the time we get
+ # here, due to the shut down semantics. This mostly happens in the
+ # test suite, but be defensive anyway.
+ stream = self._stream or sys.stderr
try:
msg = self.format(record)
fs = '%s\n'
try:
- self._stream.write(fs % msg)
+ stream.write(fs % msg)
except UnicodeError:
- self._stream.write(fs % msg.encode('string-escape'))
+ stream.write(fs % msg.encode('string-escape'))
self.flush()
except:
self.handleError(record)
@@ -97,6 +102,7 @@ class ReopenableFileHandler(logging.Handler):
def close(self):
self.flush()
self._stream.close()
+ self._stream = None
logging.Handler.close(self)
def reopen(self):