diff options
| author | Barry Warsaw | 2007-10-31 17:38:51 -0400 |
|---|---|---|
| committer | Barry Warsaw | 2007-10-31 17:38:51 -0400 |
| commit | f321d85d91a370294e771dbaa22493008d78dfdd (patch) | |
| tree | 8cf4c3e7cab70ccc9059f147ff1bf4b3bf150115 | |
| parent | 1ad73a52bb9d82ef3af1e34ad9ef66ac2eda2909 (diff) | |
| download | mailman-f321d85d91a370294e771dbaa22493008d78dfdd.tar.gz mailman-f321d85d91a370294e771dbaa22493008d78dfdd.tar.zst mailman-f321d85d91a370294e771dbaa22493008d78dfdd.zip | |
39 files changed, 431 insertions, 1022 deletions
diff --git a/Mailman/Archiver/HyperArch.py b/Mailman/Archiver/HyperArch.py index 7ce0905ca..97d608993 100644 --- a/Mailman/Archiver/HyperArch.py +++ b/Mailman/Archiver/HyperArch.py @@ -41,12 +41,12 @@ import binascii from email.Charset import Charset from email.Errors import HeaderParseError from email.Header import decode_header, make_header +from locknix.lockfile import Lock from Mailman import Errors from Mailman import MailList from Mailman import Utils from Mailman import i18n -from Mailman import lockfile from Mailman.Archiver import HyperDatabase from Mailman.Archiver import pipermail from Mailman.Mailbox import ArchiverMailbox @@ -604,9 +604,6 @@ class HyperArchive(pipermail.T): def __init__(self, maillist): # can't init the database while other processes are writing to it! - # XXX TODO- implement native locking - # with mailman's LockFile module for HyperDatabase.HyperDatabase - # dir = maillist.archive_dir() db = HyperDatabase.HyperDatabase(dir, maillist) self.__super_init(dir, reload=1, database=db) @@ -786,7 +783,7 @@ class HyperArchive(pipermail.T): def GetArchLock(self): if self._lock_file: return 1 - self._lock_file = lockfile.LockFile( + self._lock_file = Lock( os.path.join(config.LOCK_DIR, self.maillist.fqdn_listname + '-arch.lock')) try: diff --git a/Mailman/Archiver/HyperDatabase.py b/Mailman/Archiver/HyperDatabase.py index 3644fbc58..b1d0ec25b 100644 --- a/Mailman/Archiver/HyperDatabase.py +++ b/Mailman/Archiver/HyperDatabase.py @@ -27,7 +27,7 @@ import errno # package/project modules # import pipermail -from Mailman.lockfile import LockFile +from locknix import lockfile CACHESIZE = pipermail.CACHESIZE @@ -58,7 +58,7 @@ class DumbBTree: def __init__(self, path): self.current_index = 0 self.path = path - self.lockfile = LockFile(self.path + ".lock") + self.lockfile = lockfile.Lock(self.path + ".lock") self.lock() self.__dirty = 0 self.dict = {} @@ -80,7 +80,7 @@ class DumbBTree: def unlock(self): try: self.lockfile.unlock() - except LockFile.NotLockedError: + except lockfile.NotLockedError: pass def __delitem__(self, item): diff --git a/Mailman/Handlers/Scrubber.py b/Mailman/Handlers/Scrubber.py index 3f29fc02b..bb607b1d2 100644 --- a/Mailman/Handlers/Scrubber.py +++ b/Mailman/Handlers/Scrubber.py @@ -29,12 +29,12 @@ import binascii import tempfile from cStringIO import StringIO -from mimetypes import guess_all_extensions - from email.charset import Charset from email.generator import Generator from email.parser import HeaderParser from email.utils import make_msgid, parsedate +from locknix.lockfile import Lock +from mimetypes import guess_all_extensions from Mailman import Message from Mailman import Utils @@ -42,7 +42,6 @@ from Mailman.Errors import DiscardMessage from Mailman.app.archiving import get_base_archive_url from Mailman.configuration import config from Mailman.i18n import _ -from Mailman.lockfile import LockFile # Path characters for common platforms pre = re.compile(r'[/\\:]') @@ -424,7 +423,7 @@ def save_attachment(mlist, msg, dir, filter_html=True): ext = '.bin' path = None # We need a lock to calculate the next attachment number - with LockFile(os.path.join(fsdir, 'attachments.lock')): + with Lock(os.path.join(fsdir, 'attachments.lock')): # Now base the filename on what's in the attachment, uniquifying it if # necessary. if not filename or config.SCRUBBER_DONT_USE_ATTACHMENT_FILENAME: diff --git a/Mailman/MTA/Postfix.py b/Mailman/MTA/Postfix.py index 1712bb638..268d2f325 100644 --- a/Mailman/MTA/Postfix.py +++ b/Mailman/MTA/Postfix.py @@ -26,13 +26,13 @@ import time import errno import logging +from locknix.lockfile import Lock from stat import * from Mailman import Utils from Mailman.MTA.Utils import makealiases from Mailman.configuration import config from Mailman.i18n import _ -from Mailman.lockfile import LockFile LOCKFILE = os.path.join(config.LOCK_DIR, 'creator') ALIASFILE = os.path.join(config.DATA_DIR, 'aliases') @@ -338,7 +338,7 @@ def _do_remove(mlist, textfile): def remove(mlist, cgi=False): # Acquire the global list database lock - with LockFile(LOCKFILE): + with Lock(LOCKFILE): if config.USE_LMTP: _do_remove(mlist, TRPTFILE) else: diff --git a/Mailman/bin/arch.py b/Mailman/bin/arch.py index 09ca4d914..6227482ad 100644 --- a/Mailman/bin/arch.py +++ b/Mailman/bin/arch.py @@ -23,14 +23,15 @@ import errno import shutil import optparse +from locknix.lockfile import Lock + from Mailman import Errors from Mailman import Version from Mailman import i18n from Mailman.Archiver.HyperArch import HyperArchive from Mailman.Defaults import hours -from Mailman.MailList import MailList from Mailman.configuration import config -from Mailman.lockfile import LockFile +from Mailman.initialize import initialize _ = i18n._ __i18n_templates__ = True @@ -103,62 +104,51 @@ def main(): mbox = args[1] # Open the mailing list object - mlist = None - lock = None - try: + mlist = config.list_manager.get(listname) + if mlist is None: + parser.error(_('No such list: $listname')) + if mbox is None: + mbox = mlist.ArchiveFileName() + + i18n.set_language(mlist.preferred_language) + # Lay claim to the archive's lock file. This is so no other post can + # mess up the archive while we're processing it. Try to pick a + # suitably long period of time for the lock lifetime even though we + # really don't know how long it will take. + # + # XXX processUnixMailbox() should refresh the lock. + lock_path = os.path.join(mlist.full_path, '.archiver.lck') + with Lock(lock_path, lifetime=int(hours(3))): + # Maybe wipe the old archives + if opts.wipe: + if mlist.scrub_nondigest: + # TK: save the attachments dir because they are not in mbox + saved = False + atchdir = os.path.join(mlist.archive_dir(), 'attachments') + savedir = os.path.join(mlist.archive_dir() + '.mbox', + 'attachments') + try: + os.rename(atchdir, savedir) + saved = True + except OSError, e: + if e.errno <> errno.ENOENT: + raise + shutil.rmtree(mlist.archive_dir()) + if mlist.scrub_nondigest and saved: + os.renames(savedir, atchdir) try: - mlist = MailList(listname) - except Errors.MMListError, e: - parser.print_help() - print >> sys.stderr, _('No such list: $listname\n$e') + fp = open(mbox) + except IOError, e: + if e.errno == errno.ENOENT: + print >> sys.stderr, _('Cannot open mbox file: $mbox') + else: + print >> sys.stderr, e sys.exit(1) - if mbox is None: - mbox = mlist.ArchiveFileName() - - i18n.set_language(mlist.preferred_language) - # Lay claim to the archive's lock file. This is so no other post can - # mess up the archive while we're processing it. Try to pick a - # suitably long period of time for the lock lifetime even though we - # really don't know how long it will take. - # - # XXX processUnixMailbox() should refresh the lock. - with LockFile(os.path.join(mlist.full_path, '.archiver.lck'), - lifetime=int(hours(3))): - # Maybe wipe the old archives - if opts.wipe: - if mlist.scrub_nondigest: - # TK: save the attachments dir because they are not in mbox - saved = False - atchdir = os.path.join(mlist.archive_dir(), 'attachments') - savedir = os.path.join(mlist.archive_dir() + '.mbox', - 'attachments') - try: - os.rename(atchdir, savedir) - saved = True - except OSError, e: - if e.errno <> errno.ENOENT: - raise - shutil.rmtree(mlist.archive_dir()) - if mlist.scrub_nondigest and saved: - os.renames(savedir, atchdir) - try: - fp = open(mbox) - except IOError, e: - if e.errno == errno.ENOENT: - print >> sys.stderr, _('Cannot open mbox file: $mbox') - else: - print >> sys.stderr, e - sys.exit(1) - - archiver = HyperArchive(mlist) - archiver.VERBOSE = opts.verbose - try: - archiver.processUnixMailbox(fp, opts.start, opts.end) - finally: - archiver.close() - fp.close() - - -if __name__ == '__main__': - main() + archiver = HyperArchive(mlist) + archiver.VERBOSE = opts.verbose + try: + archiver.processUnixMailbox(fp, opts.start, opts.end) + finally: + archiver.close() + fp.close() diff --git a/Mailman/bin/gate_news.py b/Mailman/bin/gate_news.py index 6fc8139c6..9402504dc 100644 --- a/Mailman/bin/gate_news.py +++ b/Mailman/bin/gate_news.py @@ -24,15 +24,15 @@ import socket import logging import nntplib import optparse - import email.Errors + from email.Parser import Parser +from locknix import lockfile from Mailman import MailList from Mailman import Message from Mailman import Utils from Mailman import Version -from Mailman import lockfile from Mailman import loginit from Mailman.configuration import config from Mailman.i18n import _ @@ -233,12 +233,12 @@ def main(): log = logging.getLogger('mailman.fromusenet') try: - with lockfile.LockFile(GATENEWS_LOCK_FILE, - # It's okay to hijack this - lifetime=LOCK_LIFETIME): + with lockfile.Lock(GATENEWS_LOCK_FILE, + # It's okay to hijack this + lifetime=LOCK_LIFETIME): process_lists(lock) clearcache() - except LockFile.TimeOutError: + except lockfile.TimeOutError: log.error('Could not acquire gate_news lock') diff --git a/Mailman/bin/mailmanctl.py b/Mailman/bin/mailmanctl.py index 07716029b..8b7eeb5d2 100644 --- a/Mailman/bin/mailmanctl.py +++ b/Mailman/bin/mailmanctl.py @@ -25,11 +25,12 @@ import socket import logging import optparse +from locknix import lockfile + from Mailman import Defaults from Mailman import Errors from Mailman import Utils from Mailman import Version -from Mailman import lockfile from Mailman import loginit from Mailman.configuration import config from Mailman.i18n import _ @@ -197,7 +198,7 @@ def qrunner_state(): def acquire_lock_1(force): # Be sure we can acquire the master qrunner lock. If not, it means some # other master qrunner daemon is already going. - lock = lockfile.LockFile(config.LOCK_FILE, LOCK_LIFETIME) + lock = lockfile.Lock(config.LOCK_FILE, LOCK_LIFETIME) try: lock.lock(0.1) return lock diff --git a/Mailman/bin/testall.py b/Mailman/bin/testall.py index f6f263a31..9be591253 100644 --- a/Mailman/bin/testall.py +++ b/Mailman/bin/testall.py @@ -47,7 +47,7 @@ def v_callback(option, opt, value, parser): elif opt in ('-v', '--verbose'): delta = 1 else: - delta = 0 + raise AssertionError('Unexpected option: %s' % opt) dest = getattr(parser.values, option.dest) setattr(parser.values, option.dest, max(0, dest + delta)) @@ -217,7 +217,8 @@ def main(): with open(cfg_out, 'a') as fp: print >> fp, 'SQLALCHEMY_ENGINE_URL = "%s"' % test_engine_url - initialize_2() + # With -vvv, turn on engine debugging. + initialize_2(opts.verbosity > 3) # Run the tests basedir = os.path.dirname(Mailman.__file__) diff --git a/Mailman/bin/update.py b/Mailman/bin/update.py index 93bb0021b..379a5a1b5 100644 --- a/Mailman/bin/update.py +++ b/Mailman/bin/update.py @@ -26,12 +26,13 @@ import cPickle import marshal import optparse +from locknix.lockfile import TimeOutError + from Mailman import MailList from Mailman import Message from Mailman import Pending from Mailman import Utils from Mailman import Version -from Mailman.LockFile import TimeOutError from Mailman.MemberAdaptor import BYBOUNCE, ENABLED from Mailman.OldStyleMemberships import OldStyleMemberships from Mailman.Queue.Switchboard import Switchboard diff --git a/Mailman/configuration.py b/Mailman/configuration.py index e4d8ac715..eee9c8363 100644 --- a/Mailman/configuration.py +++ b/Mailman/configuration.py @@ -172,12 +172,15 @@ class Configuration(object): code = self.DEFAULT_SERVER_LANGUAGE self.languages.enable_language(code) - def add_domain(self, email_host, url_host): - """Add the definition of a virtual domain. + def add_domain(self, email_host, url_host=None): + """Add a virtual domain. - email_host is the right-hand side of the posting email address, - e.g. 'example.com' in 'mylist@example.com'. url_host is the host name - part of the exposed web pages, e.g. 'www.example.com'.""" + :param email_host: The host name for the email interface. + :param url_host: Optional host name for the web interface. If not + given, the email host will be used. + """ + if url_host is None: + url_host = email_host if email_host in self.domains: raise Errors.BadDomainSpecificationError( 'Duplicate email host: %s' % email_host) diff --git a/Mailman/database/__init__.py b/Mailman/database/__init__.py index e9c338952..acc74642f 100644 --- a/Mailman/database/__init__.py +++ b/Mailman/database/__init__.py @@ -25,10 +25,11 @@ __all__ = [ import os +from locknix.lockfile import Lock from elixir import objectstore from zope.interface import implements -from Mailman.interfaces import IDatabase, IPending +from Mailman.interfaces import IDatabase from Mailman.database.listmanager import ListManager from Mailman.database.usermanager import UserManager from Mailman.database.messagestore import MessageStore @@ -55,14 +56,13 @@ class StockDatabase: self.pendings = None self.requests = None - def initialize(self): + def initialize(self, debug=None): from Mailman.configuration import config from Mailman.database import model - from Mailman.lockfile import LockFile # Serialize this so we don't get multiple processes trying to create # the database at the same time. - with LockFile(os.path.join(config.LOCK_DIR, 'dbcreate.lck')): - model.initialize() + with Lock(os.path.join(config.LOCK_DIR, 'dbcreate.lck')): + model.initialize(debug) self.list_manager = ListManager() self.user_manager = UserManager() self.message_store = MessageStore() @@ -72,3 +72,7 @@ class StockDatabase: def flush(self): objectstore.flush() + + def _reset(self): + model._reset() + diff --git a/Mailman/database/listmanager.py b/Mailman/database/listmanager.py index 0f6d7a9aa..46f0aa859 100644 --- a/Mailman/database/listmanager.py +++ b/Mailman/database/listmanager.py @@ -26,7 +26,7 @@ from Mailman import Errors from Mailman.Utils import split_listname, fqdn_listname from Mailman.configuration import config from Mailman.database.model import MailingList, Pendings -from Mailman.interfaces import IListManager, IPending +from Mailman.interfaces import IListManager @@ -63,5 +63,5 @@ class ListManager(object): @property def names(self): - for mlist in MailingList.select(): + for mlist in MailingList.query.filter_by().all(): yield fqdn_listname(mlist.list_name, mlist.host_name) diff --git a/Mailman/database/messagestore.py b/Mailman/database/messagestore.py index bbaa6976b..e0e6cd9f1 100644 --- a/Mailman/database/messagestore.py +++ b/Mailman/database/messagestore.py @@ -97,11 +97,11 @@ class MessageStore: return pickle.load(fp) def get_messages_by_message_id(self, message_id): - for msgrow in Message.select_by(message_id=message_id): + for msgrow in Message.query.filter_by(message_id=message_id): yield self._msgobj(msgrow) def get_messages_by_hash(self, hash): - for msgrow in Message.select_by(hash=hash): + for msgrow in Message.query.filter_by(hash=hash): yield self._msgobj(msgrow) def _getmsg(self, global_id): @@ -110,15 +110,15 @@ class MessageStore: seqno = int(seqno) except ValueError: return None - msgrows = Message.select_by(id=seqno) - if not msgrows: + messages = Message.query.filter_by(id=seqno) + if messages.count() == 0: return None - assert len(msgrows) == 1, 'Multiple id matches' - if msgrows[0].hash <> hash: + assert messages.count() == 1, 'Multiple id matches' + if messages[0].hash <> hash: # The client lied about which message they wanted. They gave a # valid sequence number, but the hash did not match. return None - return msgrows[0] + return messages[0] def get_message(self, global_id): msgrow = self._getmsg(global_id) @@ -126,7 +126,7 @@ class MessageStore: @property def messages(self): - for msgrow in Message.select(): + for msgrow in Message.query.filter_by().all(): yield self._msgobj(msgrow) def delete_message(self, global_id): diff --git a/Mailman/database/model/__init__.py b/Mailman/database/model/__init__.py index ed91fe018..86f79a84b 100644 --- a/Mailman/database/model/__init__.py +++ b/Mailman/database/model/__init__.py @@ -36,11 +36,15 @@ from urlparse import urlparse import Mailman.Version -elixir.delay_setup = True - from Mailman import constants from Mailman.Errors import SchemaVersionMismatchError from Mailman.configuration import config + +# This /must/ be set before any Elixir classes are defined (i.e. imported). +# This tells Elixir to use the short table names (i.e. the class name) instead +# of a mangled full class path. +elixir.options_defaults['shortnames'] = True + from Mailman.database.model.address import Address from Mailman.database.model.language import Language from Mailman.database.model.mailinglist import MailingList @@ -54,7 +58,7 @@ from Mailman.database.model.version import Version -def initialize(): +def initialize(debug): # Calculate the engine url url = Template(config.SQLALCHEMY_ENGINE_URL).safe_substitute(config.paths) # XXX By design of SQLite, database file creation does not honor @@ -72,16 +76,17 @@ def initialize(): # could have chmod'd the file after the fact, but half dozen and all... touch(url) engine = create_engine(url) - engine.echo = config.SQLALCHEMY_ECHO - elixir.metadata.connect(engine) + engine.echo = (config.SQLALCHEMY_ECHO if debug is None else debug) + elixir.metadata.bind = engine elixir.setup_all() + elixir.create_all() # Validate schema version. v = Version.get_by(component='schema') if not v: # Database has not yet been initialized v = Version(component='schema', version=Mailman.Version.DATABASE_SCHEMA_VERSION) - elixir.objectstore.flush() + elixir.session.flush() elif v.version <> Mailman.Version.DATABASE_SCHEMA_VERSION: # XXX Update schema raise SchemaVersionMismatchError(v.version) @@ -96,3 +101,9 @@ def touch(url): # Ignore errors if fd > 0: os.close(fd) + + +def _reset(): + for entity in elixir.entities: + for row in entity.query.filter_by().all(): + row.delete() diff --git a/Mailman/database/model/address.py b/Mailman/database/model/address.py index 391004413..3ba3c3dbf 100644 --- a/Mailman/database/model/address.py +++ b/Mailman/database/model/address.py @@ -32,16 +32,14 @@ USER_KIND = 'Mailman.database.model.user.User' class Address(Entity): implements(IAddress) - has_field('address', Unicode) - has_field('_original', Unicode) - has_field('real_name', Unicode) - has_field('verified_on', DateTime) - has_field('registered_on', DateTime) - # Relationships - belongs_to('user', of_kind=USER_KIND) - belongs_to('preferences', of_kind=PREFERENCE_KIND) - # Options - using_options(shortnames=True) + address = Field(Unicode) + _original = Field(Unicode) + real_name = Field(Unicode) + verified_on = Field(DateTime) + registered_on = Field(DateTime) + + user = ManyToOne(USER_KIND) + preferences = ManyToOne(PREFERENCE_KIND) def __init__(self, address, real_name): super(Address, self).__init__() diff --git a/Mailman/database/model/language.py b/Mailman/database/model/language.py index e065d5bad..ffdbd2cba 100644 --- a/Mailman/database/model/language.py +++ b/Mailman/database/model/language.py @@ -16,9 +16,13 @@ # USA. from elixir import * +from zope.interface import implements +from Mailman.interfaces import ILanguage + + class Language(Entity): - has_field('code', Unicode) - # Options - using_options(shortnames=True) + implements(ILanguage) + + code = Field(Unicode) diff --git a/Mailman/database/model/mailinglist.py b/Mailman/database/model/mailinglist.py index fff5cbb9c..4057c2161 100644 --- a/Mailman/database/model/mailinglist.py +++ b/Mailman/database/model/mailinglist.py @@ -35,131 +35,129 @@ class MailingList(Entity): implements(IMailingList) # List identity - has_field('list_name', Unicode), - has_field('host_name', Unicode), + list_name = Field(Unicode) + host_name = Field(Unicode) # Attributes not directly modifiable via the web u/i - has_field('created_at', DateTime), - has_field('web_page_url', Unicode), - has_field('admin_member_chunksize', Integer), - has_field('hold_and_cmd_autoresponses', PickleType), + created_at = Field(DateTime) + web_page_url = Field(Unicode) + admin_member_chunksize = Field(Integer) + hold_and_cmd_autoresponses = Field(PickleType) # Attributes which are directly modifiable via the web u/i. The more # complicated attributes are currently stored as pickles, though that # will change as the schema and implementation is developed. - has_field('next_request_id', Integer), - has_field('next_digest_number', Integer), - has_field('admin_responses', PickleType), - has_field('postings_responses', PickleType), - has_field('request_responses', PickleType), - has_field('digest_last_sent_at', Float), - has_field('one_last_digest', PickleType), - has_field('volume', Integer), - has_field('last_post_time', DateTime), + next_request_id = Field(Integer) + next_digest_number = Field(Integer) + admin_responses = Field(PickleType) + postings_responses = Field(PickleType) + request_responses = Field(PickleType) + digest_last_sent_at = Field(Float) + one_last_digest = Field(PickleType) + volume = Field(Integer) + last_post_time = Field(DateTime) # Attributes which are directly modifiable via the web u/i. The more # complicated attributes are currently stored as pickles, though that # will change as the schema and implementation is developed. - has_field('accept_these_nonmembers', PickleType), - has_field('acceptable_aliases', PickleType), - has_field('admin_immed_notify', Boolean), - has_field('admin_notify_mchanges', Boolean), - has_field('administrivia', Boolean), - has_field('advertised', Boolean), - has_field('anonymous_list', Boolean), - has_field('archive', Boolean), - has_field('archive_private', Boolean), - has_field('archive_volume_frequency', Integer), - has_field('autorespond_admin', Boolean), - has_field('autorespond_postings', Boolean), - has_field('autorespond_requests', Integer), - has_field('autoresponse_admin_text', Unicode), - has_field('autoresponse_graceperiod', TimeDeltaType), - has_field('autoresponse_postings_text', Unicode), - has_field('autoresponse_request_text', Unicode), - has_field('ban_list', PickleType), - has_field('bounce_info_stale_after', TimeDeltaType), - has_field('bounce_matching_headers', Unicode), - has_field('bounce_notify_owner_on_disable', Boolean), - has_field('bounce_notify_owner_on_removal', Boolean), - has_field('bounce_processing', Boolean), - has_field('bounce_score_threshold', Integer), - has_field('bounce_unrecognized_goes_to_list_owner', Boolean), - has_field('bounce_you_are_disabled_warnings', Integer), - has_field('bounce_you_are_disabled_warnings_interval', TimeDeltaType), - has_field('collapse_alternatives', Boolean), - has_field('convert_html_to_plaintext', Boolean), - has_field('default_member_moderation', Boolean), - has_field('description', Unicode), - has_field('digest_footer', Unicode), - has_field('digest_header', Unicode), - has_field('digest_is_default', Boolean), - has_field('digest_send_periodic', Boolean), - has_field('digest_size_threshold', Integer), - has_field('digest_volume_frequency', Integer), - has_field('digestable', Boolean), - has_field('discard_these_nonmembers', PickleType), - has_field('emergency', Boolean), - has_field('encode_ascii_prefixes', Boolean), - has_field('filter_action', Integer), - has_field('filter_content', Boolean), - has_field('filter_filename_extensions', PickleType), - has_field('filter_mime_types', PickleType), - has_field('first_strip_reply_to', Boolean), - has_field('forward_auto_discards', Boolean), - has_field('gateway_to_mail', Boolean), - has_field('gateway_to_news', Boolean), - has_field('generic_nonmember_action', Integer), - has_field('goodbye_msg', Unicode), - has_field('header_filter_rules', PickleType), - has_field('hold_these_nonmembers', PickleType), - has_field('include_list_post_header', Boolean), - has_field('include_rfc2369_headers', Boolean), - has_field('info', Unicode), - has_field('linked_newsgroup', Unicode), - has_field('max_days_to_hold', Integer), - has_field('max_message_size', Integer), - has_field('max_num_recipients', Integer), - has_field('member_moderation_action', Boolean), - has_field('member_moderation_notice', Unicode), - has_field('mime_is_default_digest', Boolean), - has_field('moderator_password', Unicode), - has_field('msg_footer', Unicode), - has_field('msg_header', Unicode), - has_field('new_member_options', Integer), - has_field('news_moderation', EnumType), - has_field('news_prefix_subject_too', Boolean), - has_field('nntp_host', Unicode), - has_field('nondigestable', Boolean), - has_field('nonmember_rejection_notice', Unicode), - has_field('obscure_addresses', Boolean), - has_field('pass_filename_extensions', PickleType), - has_field('pass_mime_types', PickleType), - has_field('personalize', EnumType), - has_field('post_id', Integer), - has_field('preferred_language', Unicode), - has_field('private_roster', Boolean), - has_field('real_name', Unicode), - has_field('reject_these_nonmembers', PickleType), - has_field('reply_goes_to_list', EnumType), - has_field('reply_to_address', Unicode), - has_field('require_explicit_destination', Boolean), - has_field('respond_to_post_requests', Boolean), - has_field('scrub_nondigest', Boolean), - has_field('send_goodbye_msg', Boolean), - has_field('send_reminders', Boolean), - has_field('send_welcome_msg', Boolean), - has_field('subject_prefix', Unicode), - has_field('subscribe_auto_approval', PickleType), - has_field('subscribe_policy', Integer), - has_field('topics', PickleType), - has_field('topics_bodylines_limit', Integer), - has_field('topics_enabled', Boolean), - has_field('unsubscribe_policy', Integer), - has_field('welcome_msg', Unicode), + accept_these_nonmembers = Field(PickleType) + acceptable_aliases = Field(PickleType) + admin_immed_notify = Field(Boolean) + admin_notify_mchanges = Field(Boolean) + administrivia = Field(Boolean) + advertised = Field(Boolean) + anonymous_list = Field(Boolean) + archive = Field(Boolean) + archive_private = Field(Boolean) + archive_volume_frequency = Field(Integer) + autorespond_admin = Field(Boolean) + autorespond_postings = Field(Boolean) + autorespond_requests = Field(Integer) + autoresponse_admin_text = Field(Unicode) + autoresponse_graceperiod = Field(TimeDeltaType) + autoresponse_postings_text = Field(Unicode) + autoresponse_request_text = Field(Unicode) + ban_list = Field(PickleType) + bounce_info_stale_after = Field(TimeDeltaType) + bounce_matching_headers = Field(Unicode) + bounce_notify_owner_on_disable = Field(Boolean) + bounce_notify_owner_on_removal = Field(Boolean) + bounce_processing = Field(Boolean) + bounce_score_threshold = Field(Integer) + bounce_unrecognized_goes_to_list_owner = Field(Boolean) + bounce_you_are_disabled_warnings = Field(Integer) + bounce_you_are_disabled_warnings_interval = Field(TimeDeltaType) + collapse_alternatives = Field(Boolean) + convert_html_to_plaintext = Field(Boolean) + default_member_moderation = Field(Boolean) + description = Field(Unicode) + digest_footer = Field(Unicode) + digest_header = Field(Unicode) + digest_is_default = Field(Boolean) + digest_send_periodic = Field(Boolean) + digest_size_threshold = Field(Integer) + digest_volume_frequency = Field(Integer) + digestable = Field(Boolean) + discard_these_nonmembers = Field(PickleType) + emergency = Field(Boolean) + encode_ascii_prefixes = Field(Boolean) + filter_action = Field(Integer) + filter_content = Field(Boolean) + filter_filename_extensions = Field(PickleType) + filter_mime_types = Field(PickleType) + first_strip_reply_to = Field(Boolean) + forward_auto_discards = Field(Boolean) + gateway_to_mail = Field(Boolean) + gateway_to_news = Field(Boolean) + generic_nonmember_action = Field(Integer) + goodbye_msg = Field(Unicode) + header_filter_rules = Field(PickleType) + hold_these_nonmembers = Field(PickleType) + include_list_post_header = Field(Boolean) + include_rfc2369_headers = Field(Boolean) + info = Field(Unicode) + linked_newsgroup = Field(Unicode) + max_days_to_hold = Field(Integer) + max_message_size = Field(Integer) + max_num_recipients = Field(Integer) + member_moderation_action = Field(Boolean) + member_moderation_notice = Field(Unicode) + mime_is_default_digest = Field(Boolean) + moderator_password = Field(Unicode) + msg_footer = Field(Unicode) + msg_header = Field(Unicode) + new_member_options = Field(Integer) + news_moderation = Field(EnumType) + news_prefix_subject_too = Field(Boolean) + nntp_host = Field(Unicode) + nondigestable = Field(Boolean) + nonmember_rejection_notice = Field(Unicode) + obscure_addresses = Field(Boolean) + pass_filename_extensions = Field(PickleType) + pass_mime_types = Field(PickleType) + personalize = Field(EnumType) + post_id = Field(Integer) + preferred_language = Field(Unicode) + private_roster = Field(Boolean) + real_name = Field(Unicode) + reject_these_nonmembers = Field(PickleType) + reply_goes_to_list = Field(EnumType) + reply_to_address = Field(Unicode) + require_explicit_destination = Field(Boolean) + respond_to_post_requests = Field(Boolean) + scrub_nondigest = Field(Boolean) + send_goodbye_msg = Field(Boolean) + send_reminders = Field(Boolean) + send_welcome_msg = Field(Boolean) + subject_prefix = Field(Unicode) + subscribe_auto_approval = Field(PickleType) + subscribe_policy = Field(Integer) + topics = Field(PickleType) + topics_bodylines_limit = Field(Integer) + topics_enabled = Field(Boolean) + unsubscribe_policy = Field(Integer) + welcome_msg = Field(Unicode) # Relationships ## has_and_belongs_to_many( ## 'available_languages', ## of_kind='Mailman.database.model.languages.Language') - # Options - using_options(shortnames=True) def __init__(self, fqdn_listname): super(MailingList, self).__init__() diff --git a/Mailman/database/model/member.py b/Mailman/database/model/member.py index 1dc942323..4f353a06c 100644 --- a/Mailman/database/model/member.py +++ b/Mailman/database/model/member.py @@ -32,13 +32,11 @@ PREFERENCE_KIND = 'Mailman.database.model.preferences.Preferences' class Member(Entity): implements(IMember) - has_field('role', EnumType) - has_field('mailing_list', Unicode) + role = Field(EnumType) + mailing_list = Field(Unicode) # Relationships - belongs_to('address', of_kind=ADDRESS_KIND) - belongs_to('preferences', of_kind=PREFERENCE_KIND) - # Options - using_options(shortnames=True) + address = ManyToOne(ADDRESS_KIND) + preferences = ManyToOne(PREFERENCE_KIND) def __repr__(self): return '<Member: %s on %s as %s>' % ( diff --git a/Mailman/database/model/message.py b/Mailman/database/model/message.py index df8371c6a..eb4b4616d 100644 --- a/Mailman/database/model/message.py +++ b/Mailman/database/model/message.py @@ -18,13 +18,15 @@ from elixir import * from zope.interface import implements +from Mailman.interfaces import IMessage + class Message(Entity): """A message in the message store.""" - has_field('hash', Unicode) - has_field('path', Unicode) - has_field('message_id', Unicode) + implements(IMessage) - using_options(shortnames=True) + hash = Field(Unicode) + path = Field(Unicode) + message_id = Field(Unicode) diff --git a/Mailman/database/model/pending.py b/Mailman/database/model/pending.py index ae2ad3d60..75bb59d3c 100644 --- a/Mailman/database/model/pending.py +++ b/Mailman/database/model/pending.py @@ -27,30 +27,30 @@ from zope.interface import implements from zope.interface.verify import verifyObject from Mailman.configuration import config -from Mailman.interfaces import IPending, IPendable +from Mailman.interfaces import ( + IPendings, IPendable, IPendedKeyValue, IPended) -PEND_KIND = 'Mailman.database.model.pending.Pending' +PEND_KIND = 'Mailman.database.model.pending.Pended' class PendedKeyValue(Entity): """A pended key/value pair, tied to a token.""" - has_field('key', Unicode) - has_field('value', Unicode) - # Relationships - belongs_to('pended', of_kind=PEND_KIND) - # Options - using_options(shortnames=True) + implements(IPendedKeyValue) + key = Field(Unicode) + value = Field(Unicode) + pended = ManyToOne(PEND_KIND) -class Pending(Entity): + +class Pended(Entity): """A pended event, tied to a token.""" - has_field('token', Unicode) - has_field('expiration_date', DateTime) - # Options - using_options(shortnames=True) + implements(IPended) + + token = Field(Unicode) + expiration_date = Field(DateTime) @@ -62,7 +62,7 @@ class UnpendedPendable(dict): class Pendings(object): """Implementation of the IPending interface.""" - implements(IPending) + implements(IPendings) def add(self, pendable, lifetime=None): verifyObject(IPendable, pendable) @@ -75,17 +75,19 @@ class Pendings(object): # clock values basically help obscure the random number generator, as # does the hash calculation. The integral parts of the time values # are discarded because they're the most predictable bits. - while True: + for attempts in range(3): now = time.time() x = random.random() + now % 1.0 + time.clock() % 1.0 # Use sha1 because it produces shorter strings. token = hashlib.sha1(repr(x)).hexdigest() # In practice, we'll never get a duplicate, but we'll be anal # about checking anyway. - if not Pending.select_by(token=token): + if Pended.query.filter_by(token=token).count() == 0: break + else: + raise AssertionError('Could not find a valid pendings token') # Create the record, and then the individual key/value pairs. - pending = Pending( + pending = Pended( token=token, expiration_date=datetime.datetime.now() + lifetime) for key, value in pendable.items(): @@ -93,17 +95,18 @@ class Pendings(object): return token def confirm(self, token, expunge=True): - pendings = Pending.select_by(token=token) - assert 0 <= len(pendings) <= 1, 'Unexpected token search results' - if len(pendings) == 0: + pendings = Pended.query.filter_by(token=token) + if pendings.count() == 0: return None + assert pendings.count() == 1, ( + 'Unexpected token count: %d' % pendings.count()) pending = pendings[0] pendable = UnpendedPendable() # Find all PendedKeyValue entries that are associated with the pending # object's ID. - q = PendedKeyValue.filter( - PendedKeyValue.c.pended_id == Pending.c.id).filter( - Pending.c.id == pending.id) + q = PendedKeyValue.query.filter( + PendedKeyValue.c.pended_id == Pended.c.id).filter( + Pended.c.id == pending.id) for keyvalue in q.all(): pendable[keyvalue.key] = keyvalue.value if expunge: @@ -114,13 +117,13 @@ class Pendings(object): def evict(self): now = datetime.datetime.now() - for pending in Pending.select(): + for pending in Pended.query.filter_by().all(): if pending.expiration_date < now: # Find all PendedKeyValue entries that are associated with the # pending object's ID. - q = PendedKeyValue.filter( - PendedKeyValue.c.pended_id == Pending.c.id).filter( - Pending.c.id == pending.id) + q = PendedKeyValue.query.filter( + PendedKeyValue.c.pended_id == Pended.c.id).filter( + Pended.c.id == pending.id) for keyvalue in q: keyvalue.delete() pending.delete() diff --git a/Mailman/database/model/preferences.py b/Mailman/database/model/preferences.py index 07d4d84e2..8cbb77e6a 100644 --- a/Mailman/database/model/preferences.py +++ b/Mailman/database/model/preferences.py @@ -31,15 +31,13 @@ USER_KIND = 'Mailman.database.model.user.User' class Preferences(Entity): implements(IPreferences) - has_field('acknowledge_posts', Boolean) - has_field('hide_address', Boolean) - has_field('preferred_language', Unicode) - has_field('receive_list_copy', Boolean) - has_field('receive_own_postings', Boolean) - has_field('delivery_mode', EnumType) - has_field('delivery_status', EnumType) - # Options - using_options(shortnames=True) + acknowledge_posts = Field(Boolean) + hide_address = Field(Boolean) + preferred_language = Field(Unicode) + receive_list_copy = Field(Boolean) + receive_own_postings = Field(Boolean) + delivery_mode = Field(EnumType) + delivery_status = Field(EnumType) def __repr__(self): return '<Preferences object at %#x>' % id(self) diff --git a/Mailman/database/model/requests.py b/Mailman/database/model/requests.py index ea917c2b9..037483c1a 100644 --- a/Mailman/database/model/requests.py +++ b/Mailman/database/model/requests.py @@ -49,23 +49,21 @@ class ListRequests: @property def count(self): - results = _Request.select_by(mailing_list=self.mailing_list) - return len(results) + return _Request.query.filter_by(mailing_list=self.mailing_list).count() def count_of(self, request_type): - results = _Request.select_by(mailing_list=self.mailing_list, - type=request_type) - return len(results) + return _Request.query.filter_by(mailing_list=self.mailing_list, + type=request_type).count() @property def held_requests(self): - results = _Request.select_by(mailing_list=self.mailing_list) + results = _Request.query.filter_by(mailing_list=self.mailing_list) for request in results: yield request def of_type(self, request_type): - results = _Request.select_by(mailing_list=self.mailing_list, - type=request_type) + results = _Request.query.filter_by(mailing_list=self.mailing_list, + type=request_type) for request in results: yield request @@ -132,10 +130,8 @@ class Requests: class _Request(Entity): """Table for mailing list hold requests.""" - has_field('key', Unicode) - has_field('type', EnumType) - has_field('data_hash', Unicode) + key = Field(Unicode) + type = Field(EnumType) + data_hash = Field(Unicode) # Relationships - belongs_to('mailing_list', of_kind=MAILINGLIST_KIND) - # Options - using_options(shortnames=True) + mailing_list = ManyToOne(MAILINGLIST_KIND) diff --git a/Mailman/database/model/roster.py b/Mailman/database/model/roster.py index e59bc8b17..c8fa86d58 100644 --- a/Mailman/database/model/roster.py +++ b/Mailman/database/model/roster.py @@ -49,8 +49,9 @@ class AbstractRoster(object): @property def members(self): - for member in Member.select_by(mailing_list=self._mlist.fqdn_listname, - role=self.role): + for member in Member.query.filter_by( + mailing_list=self._mlist.fqdn_listname, + role=self.role): yield member @property @@ -72,18 +73,18 @@ class AbstractRoster(object): yield member.address def get_member(self, address): - results = Member.select( + results = Member.query.filter( and_(Member.c.mailing_list == self._mlist.fqdn_listname, Member.c.role == self.role, Address.c.address == address, Member.c.address_id == Address.c.id)) - if len(results) == 0: + if results.count() == 0: return None - elif len(results) == 1: + elif results.count() == 1: return results[0] else: - assert len(results) <= 1, ( - 'Too many matching member results: %s' % results) + raise AssertionError('Too many matching member results: %s' % + results.count()) @@ -120,7 +121,7 @@ class AdministratorRoster(AbstractRoster): def members(self): # Administrators are defined as the union of the owners and the # moderators. - members = Member.select( + members = Member.query.filter( and_(Member.c.mailing_list == self._mlist.fqdn_listname, or_(Member.c.role == MemberRole.owner, Member.c.role == MemberRole.moderator))) @@ -128,18 +129,18 @@ class AdministratorRoster(AbstractRoster): yield member def get_member(self, address): - results = Member.select( + results = Member.query.filter( and_(Member.c.mailing_list == self._mlist.fqdn_listname, or_(Member.c.role == MemberRole.moderator, Member.c.role == MemberRole.owner), Address.c.address == address, Member.c.address_id == Address.c.id)) - if len(results) == 0: + if results.count() == 0: return None - elif len(results) == 1: + elif results.count() == 1: return results[0] else: - assert len(results) <= 1, ( + raise AssertionError( 'Too many matching member results: %s' % results) @@ -154,8 +155,9 @@ class RegularMemberRoster(AbstractRoster): # Query for all the Members which have a role of MemberRole.member and # are subscribed to this mailing list. Then return only those members # that have a regular delivery mode. - for member in Member.select_by(mailing_list=self._mlist.fqdn_listname, - role=MemberRole.member): + for member in Member.query.filter_by( + mailing_list=self._mlist.fqdn_listname, + role=MemberRole.member): if member.delivery_mode == DeliveryMode.regular: yield member @@ -179,8 +181,9 @@ class DigestMemberRoster(AbstractRoster): # Query for all the Members which have a role of MemberRole.member and # are subscribed to this mailing list. Then return only those members # that have one of the digest delivery modes. - for member in Member.select_by(mailing_list=self._mlist.fqdn_listname, - role=MemberRole.member): + for member in Member.query.filter_by( + mailing_list=self._mlist.fqdn_listname, + role=MemberRole.member): if member.delivery_mode in _digest_modes: yield member @@ -193,5 +196,6 @@ class Subscribers(AbstractRoster): @property def members(self): - for member in Member.select_by(mailing_list=self._mlist.fqdn_listname): + for member in Member.query.filter_by( + mailing_list=self._mlist.fqdn_listname): yield member diff --git a/Mailman/database/model/user.py b/Mailman/database/model/user.py index 683ec0f90..895beef9f 100644 --- a/Mailman/database/model/user.py +++ b/Mailman/database/model/user.py @@ -32,13 +32,11 @@ PREFERENCE_KIND = 'Mailman.database.model.preferences.Preferences' class User(Entity): implements(IUser) - has_field('real_name', Unicode) - has_field('password', Unicode) - # Relationships - has_many('addresses', of_kind=ADDRESS_KIND) - belongs_to('preferences', of_kind=PREFERENCE_KIND) - # Options - using_options(shortnames=True) + real_name = Field(Unicode) + password = Field(Unicode) + + addresses = OneToMany(ADDRESS_KIND) + preferences = ManyToOne(PREFERENCE_KIND) def __repr__(self): return '<User "%s" at %#x>' % (self.real_name, id(self)) @@ -47,13 +45,11 @@ class User(Entity): if address.user is not None: raise Errors.AddressAlreadyLinkedError(address) address.user = self - self.addresses.append(address) def unlink(self, address): if address.user is None: raise Errors.AddressNotLinkedError(address) address.user = None - self.addresses.remove(address) def controls(self, address): found = Address.get_by(address=address) diff --git a/Mailman/database/model/version.py b/Mailman/database/model/version.py index 7b12778ce..dbbf5b8c1 100644 --- a/Mailman/database/model/version.py +++ b/Mailman/database/model/version.py @@ -19,7 +19,5 @@ from elixir import * class Version(Entity): - has_field('component', Unicode) - has_field('version', Integer) - # Options - using_options(shortnames=True) + component = Field(Unicode) + version = Field(Integer) diff --git a/Mailman/database/usermanager.py b/Mailman/database/usermanager.py index 1958080fd..6bc2ed53d 100644 --- a/Mailman/database/usermanager.py +++ b/Mailman/database/usermanager.py @@ -36,7 +36,7 @@ class UserManager(object): def create_user(self, address=None, real_name=None): user = User() - user.real_name = (real_name if real_name is not None else '') + user.real_name = ('' if real_name is None else real_name) if address: addrobj = Address(address, user.real_name) addrobj.preferences = Preferences() @@ -49,19 +49,28 @@ class UserManager(object): @property def users(self): - for user in User.select(): + for user in User.query.filter_by().all(): yield user def get_user(self, address): - found = Address.get_by(address=address.lower()) - return found and found.user + addresses = Address.query.filter_by(address=address.lower()) + if addresses.count() == 0: + return None + elif addresses.count() == 1: + return addresses[0].user + else: + raise AssertionError('Unexpected query count') def create_address(self, address, real_name=None): - found = Address.get_by(address=address.lower()) - if found: + addresses = Address.query.filter_by(address=address.lower()) + if addresses.count() == 1: + found = addresses[0] raise Errors.ExistingAddressError(found.original_address) + assert addresses.count() == 0, 'Unexpected results' if real_name is None: real_name = '' + # It's okay not to lower case the 'address' argument because the + # constructor will do the right thing. address = Address(address, real_name) address.preferences = Preferences() return address @@ -74,9 +83,15 @@ class UserManager(object): address.delete() def get_address(self, address): - return Address.get_by(address=address.lower()) + addresses = Address.query.filter_by(address=address.lower()) + if addresses.count() == 0: + return None + elif addresses.count() == 1: + return addresses[0] + else: + raise AssertionError('Unexpected query count') @property def addresses(self): - for address in Address.select(): + for address in Address.query.filter_by().all(): yield address diff --git a/Mailman/docs/addresses.txt b/Mailman/docs/addresses.txt index 4d4157f83..4dd8b44ad 100644 --- a/Mailman/docs/addresses.txt +++ b/Mailman/docs/addresses.txt @@ -68,6 +68,8 @@ You can create email addresses that are linked to users by using a different interface. >>> user_1 = usermgr.create_user('cperson@example.com', 'Claire Person') + >>> sorted(address.address for address in user_1.addresses) + ['cperson@example.com'] >>> flush() >>> sorted(address.address for address in usermgr.addresses) ['aperson@example.com', 'bperson@example.com', 'cperson@example.com'] @@ -87,7 +89,7 @@ And now you can find the associated user. Deleting addresses ------------------ -You can remove an unlinked address from the usre manager. +You can remove an unlinked address from the user manager. >>> usermgr.delete_address(address_1) >>> flush() diff --git a/Mailman/docs/pending.txt b/Mailman/docs/pending.txt index de6326219..d93da5283 100644 --- a/Mailman/docs/pending.txt +++ b/Mailman/docs/pending.txt @@ -8,20 +8,21 @@ This is not where messages held for administrator approval are kept. >>> from Mailman.configuration import config >>> from Mailman.database import flush - >>> from Mailman.interfaces import IPendable, IPending >>> from zope.interface import implements >>> from zope.interface.verify import verifyObject In order to pend an event, you first need a pending database, which is available by adapting the list manager. + >>> from Mailman.interfaces import IPendings >>> pendingdb = config.db.pendings - >>> verifyObject(IPending, pendingdb) + >>> verifyObject(IPendings, pendingdb) True The pending database can add any IPendable to the database, returning a token that can be used in urls and such. + >>> from Mailman.interfaces import IPendable >>> class SimplePendable(dict): ... implements(IPendable) >>> subscription = SimplePendable( diff --git a/Mailman/docs/requests.txt b/Mailman/docs/requests.txt index cf11c3653..914ce7dfb 100644 --- a/Mailman/docs/requests.txt +++ b/Mailman/docs/requests.txt @@ -124,7 +124,7 @@ originally held. >>> key, data = requests.get_request(2) >>> key - 'hold_2' + u'hold_2' Because we did not store additional data with request 2, it comes back as None now. @@ -179,7 +179,7 @@ database. 1 RequestType.held_message hold_1 None 3 RequestType.unsubscription hold_3 None 4 RequestType.held_message hold_4 None - 5 RequestType.held_message hold_5 [('bar', 'no'), ('foo', 'yes')] + 5 RequestType.held_message hold_5 [(u'bar', u'no'), (u'foo', u'yes')] >>> print requests.get_request(2) None @@ -818,7 +818,7 @@ and the person remains a member of the mailing list. [('_parsemsg', False), ('listname', 'alist@example.com'), ('nodecorate', True), ('received_time', ...), - ('recips', ['hperson@example.com']), + ('recips', [u'hperson@example.com']), ('reduced_list_headers', True), ('version', 3)] >>> mlist.members.get_member('hperson@example.com') <Member: hperson@example.com on alist@example.com as MemberRole.member> diff --git a/Mailman/initialize.py b/Mailman/initialize.py index b17e3a9e2..0993acf3d 100644 --- a/Mailman/initialize.py +++ b/Mailman/initialize.py @@ -57,13 +57,13 @@ def initialize_1(config_path, propagate_logs): Mailman.loginit.initialize(propagate_logs) -def initialize_2(): +def initialize_2(debug=False): database_plugin = get_plugin('mailman.database') # Instantiate the database plugin, ensure that it's of the right type, and # initialize it. Then stash the object on our configuration object. database = database_plugin() verifyObject(IDatabase, database) - database.initialize() + database.initialize(debug) Mailman.configuration.config.db = database diff --git a/Mailman/interfaces/database.py b/Mailman/interfaces/database.py index 23405b9e8..f4dd693a4 100644 --- a/Mailman/interfaces/database.py +++ b/Mailman/interfaces/database.py @@ -30,12 +30,24 @@ from zope.interface import Interface, Attribute class IDatabase(Interface): """Database layer interface.""" - def initialize(): - """Initialize the database layer, using whatever means necessary.""" + def initialize(debug=None): + """Initialize the database layer, using whatever means necessary. + + :param debug: When None (the default), the configuration file + determines whether the database layer should have increased + debugging or not. When True or False, this overrides the + configuration file setting. + """ def flush(): """Flush current database changes.""" + def _reset(): + """Reset the database to its pristine state. + + This is only used by the test framework. + """ + # XXX Eventually we probably need to support a transaction manager # interface, e.g. begin(), commit(), abort(). We will probably also need # to support a shutdown() method for cleanly disconnecting from the diff --git a/Mailman/interfaces/languages.py b/Mailman/interfaces/languages.py index b9ddd8c90..84663cd89 100644 --- a/Mailman/interfaces/languages.py +++ b/Mailman/interfaces/languages.py @@ -71,3 +71,10 @@ class ILanguageManager(Interface): enabled_names = Attribute( """An iterator over all enabled language names.""") + + + +class ILanguage(Interface): + """The representation of a language.""" + + code = Attribute("""The 2-character language code.""") diff --git a/Mailman/interfaces/messagestore.py b/Mailman/interfaces/messages.py index 541238fd1..9fac98d76 100644 --- a/Mailman/interfaces/messagestore.py +++ b/Mailman/interfaces/messages.py @@ -99,3 +99,14 @@ class IMessageStore(Interface): messages = Attribute( """An iterator over all messages in this message store.""") + + + +class IMessage(Interface): + """The representation of an email message.""" + + hash = Attribute("""The unique SHA1 hash of the message.""") + + path = Attribute("""The filesystem path to the message object.""") + + message_id = Attribute("""The message's Message-ID header.""") diff --git a/Mailman/interfaces/pending.py b/Mailman/interfaces/pending.py index 68a4c41de..22d18a07c 100644 --- a/Mailman/interfaces/pending.py +++ b/Mailman/interfaces/pending.py @@ -40,10 +40,28 @@ class IPendable(Interface): Both the keys and values must be strings. """ - + + + +class IPended(Interface): + """A pended event, tied to a token.""" + + token = Attribute("""The pended token.""") + + expiration_date = Attribute("""The expiration date of the pended event.""") + + + +class IPendedKeyValue(Interface): + """A pended key/value pair.""" + + key = Attribute("""The pended key.""") + + value = Attribute("""The pended value.""") + -class IPending(Interface): +class IPendings(Interface): """Interface to pending database.""" def add(pendable, lifetime=None): diff --git a/Mailman/lockfile.py b/Mailman/lockfile.py deleted file mode 100644 index 7db746952..000000000 --- a/Mailman/lockfile.py +++ /dev/null @@ -1,583 +0,0 @@ -# Copyright (C) 1998-2007 by the Free Software Foundation, Inc. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -# USA. - -"""Portable, NFS-safe file locking with timeouts. - -This code implements an NFS-safe file-based locking algorithm influenced by -the GNU/Linux open(2) manpage, under the description of the O_EXCL option. -From RH6.1: - - [...] O_EXCL is broken on NFS file systems, programs which rely on it - for performing locking tasks will contain a race condition. The - solution for performing atomic file locking using a lockfile is to - create a unique file on the same fs (e.g., incorporating hostname and - pid), use link(2) to make a link to the lockfile. If link() returns - 0, the lock is successful. Otherwise, use stat(2) on the unique file - to check if its link count has increased to 2, in which case the lock - is also successful. - -The assumption made here is that there will be no `outside interference', -e.g. no agent external to this code will have access to link() to the affected -lock files. - -LockFile objects support lock-breaking so that you can't wedge a process -forever. This is especially helpful in a web environment, but may not be -appropriate for all applications. - -Locks have a `lifetime', which is the maximum length of time the process -expects to retain the lock. It is important to pick a good number here -because other processes will not break an existing lock until the expected -lifetime has expired. Too long and other processes will hang; too short and -you'll end up trampling on existing process locks -- and possibly corrupting -data. In a distributed (NFS) environment, you also need to make sure that -your clocks are properly synchronized. -""" - -__metaclass__ = type -__all__ = [ - 'LockError', - 'AlreadyLockedError', - 'NotLockedError', - 'LockFile', - ] - -# This code has undergone several revisions, with contributions from Barry -# Warsaw, Thomas Wouters, Harald Meland, and John Viega. It should also work -# well outside of Mailman so it could be used for other Python projects -# requiring file locking. See the __main__ section at the bottom of the file -# for unit testing. - -import os -import time -import errno -import random -import socket -import logging -import datetime -import traceback - -# Units are floating-point seconds. -DEFAULT_LOCK_LIFETIME = datetime.timedelta(seconds=15) -# Allowable a bit of clock skew, in seconds. -CLOCK_SLOP = 10 -# This is appropriate for Mailman, but you may want to change this if you're -# using this code outside Mailman. -log = logging.getLogger('mailman.locks') - - - -# Exceptions that can be raised by this module -class LockError(Exception): - """Base class for all exceptions in this module.""" - -class AlreadyLockedError(LockError): - """An attempt is made to lock an already locked object.""" - -class NotLockedError(LockError): - """An attempt is made to unlock an object that isn't locked.""" - -class TimeOutError(LockError): - """The timeout interval elapsed before the lock succeeded.""" - - - -class LockFile: - """A portable way to lock resources by way of the file system. - - This class supports the following methods: - - __init__(lockfile[, lifetime]): - Create the resource lock using lockfile as the global lock file. Each - process laying claim to this resource lock will create their own - temporary lock files based on the path specified by lockfile. - Optional lifetime is a timedelta specifying the number of seconds the - process expects to hold the lock. - - set_lifetime(lifetime): - Set a new lock lifetime. This takes affect the next time the file is - locked, but does not refresh a locked file. - - get_lifetime(): - Return the lock's lifetime. - - refresh([newlifetime[, unconditionally]]): - Refreshes the lifetime of a locked file. Use this if you realize that - you need to keep a resource locked longer than you thought. With - optional newlifetime, set the lock's lifetime. Raises NotLockedError - if the lock is not set, unless optional unconditionally flag is set to - true. - - lock([timeout]): - Acquire the lock. This blocks until the lock is acquired unless - optional timeout is greater than 0, in which case, a TimeOutError is - raised when timeout number of seconds (or possibly more) expires - without lock acquisition. Raises AlreadyLockedError if the lock is - already set. - - unlock([unconditionally]): - Relinquishes the lock. Raises a NotLockedError if the lock is not - set, unless optional unconditionally is true. - - locked(): - Return true if the lock is set, otherwise false. To avoid race - conditions, this refreshes the lock (on set locks). - - """ - # XXX We need to watch out for two lock objects in the same process - # pointing to the same lock file. Without this, if you lock lf1 and do - # not lock lf2, lf2.locked() will still return true. NOTE: this gimmick - # probably does /not/ work in a multithreaded world, but we don't have to - # worry about that, do we? <1 wink>. - COUNTER = 0 - - def __init__(self, lockfile, lifetime=DEFAULT_LOCK_LIFETIME): - """Create the resource lock using lockfile as the global lock file. - - Each process laying claim to this resource lock will create their own - temporary lock files based on the path specified by lockfile. - Optional lifetime is the number of seconds the process expects to hold - the lock. Optional withlogging, when true, turns on lockfile logging - (see the module docstring for details). - """ - self._lockfile = lockfile - self._lifetime = lifetime - # This works because we know we're single threaded - self._counter = LockFile.COUNTER - LockFile.COUNTER += 1 - self._tmpfname = '%s.%s.%d.%d' % ( - lockfile, socket.gethostname(), os.getpid(), self._counter) - # For transferring ownership across a fork. - self._owned = True - - def __repr__(self): - return '<LockFile %s: %s [%s: %s] pid=%s>' % ( - id(self), self._lockfile, - self.locked() and 'locked' or 'unlocked', - self._lifetime, os.getpid()) - - def set_lifetime(self, lifetime): - """Set a new lock lifetime. - - This takes affect the next time the file is locked, but does not - refresh a locked file. - """ - self._lifetime = lifetime - - def get_lifetime(self): - """Return the lock's lifetime.""" - return self._lifetime - - def refresh(self, newlifetime=None, unconditionally=False): - """Refreshes the lifetime of a locked file. - - Use this if you realize that you need to keep a resource locked longer - than you thought. With optional newlifetime, set the lock's lifetime. - Raises NotLockedError if the lock is not set, unless optional - unconditionally flag is set to true. - """ - if newlifetime is not None: - self.set_lifetime(newlifetime) - # Do we have the lock? As a side effect, this refreshes the lock! - if not self.locked() and not unconditionally: - raise NotLockedError('%s: %s' % (repr(self), self._read())) - - def lock(self, timeout=0): - """Acquire the lock. - - This blocks until the lock is acquired unless optional timeout is - greater than 0, in which case, a TimeOutError is raised when timeout - number of seconds (or possibly more) expires without lock acquisition. - Raises AlreadyLockedError if the lock is already set. - """ - if timeout: - timeout_time = time.time() + timeout - # Make sure my temp lockfile exists, and that its contents are - # up-to-date (e.g. the temp file name, and the lock lifetime). - self._write() - # XXX This next call can fail with an EPERM. I have no idea why, but - # I'm nervous about wrapping this in a try/except. It seems to be a - # very rare occurence, only happens from cron, and (only?) on Solaris - # 2.6. - self._touch() - log.debug('laying claim: %s', self._lockfile) - # for quieting the logging output - loopcount = -1 - while True: - loopcount += 1 - # Create the hard link and test for exactly 2 links to the file - try: - os.link(self._tmpfname, self._lockfile) - # If we got here, we know we know we got the lock, and never - # had it before, so we're done. Just touch it again for the - # fun of it. - log.debug('got the lock: %s', self._lockfile) - self._touch() - break - except OSError, e: - # The link failed for some reason, possibly because someone - # else already has the lock (i.e. we got an EEXIST), or for - # some other bizarre reason. - if e.errno == errno.ENOENT: - # XXX in some Linux environments, it is possible to get - # an ENOENT, which is truly strange, because this means - # that self._tmpfname doesn't exist at the time of the - # os.link(), but self._write() is supposed to guarantee - # that this happens! I don't honestly know why this - # happens, but for now we just say we didn't acquire the - # lock, and try again next time. - pass - elif e.errno <> errno.EEXIST: - # Something very bizarre happened. Clean up our state and - # pass the error on up. - log.exception('unexpected link') - os.unlink(self._tmpfname) - raise - elif self._linkcount() <> 2: - # Somebody's messin' with us! Log this, and try again - # later. XXX should we raise an exception? - log.error('unexpected linkcount: %d', self._linkcount()) - elif self._read() == self._tmpfname: - # It was us that already had the link. - log.debug('already locked: %s', self._lockfile) - raise AlreadyLockedError - # otherwise, someone else has the lock - pass - # We did not acquire the lock, because someone else already has - # it. Have we timed out in our quest for the lock? - if timeout and timeout_time < time.time(): - os.unlink(self._tmpfname) - log.error('timed out') - raise TimeOutError - # Okay, we haven't timed out, but we didn't get the lock. Let's - # find if the lock lifetime has expired. - if time.time() > self._releasetime() + CLOCK_SLOP: - # Yes, so break the lock. - self._break() - log.error('lifetime has expired, breaking') - # Okay, someone else has the lock, our claim hasn't timed out yet, - # and the expected lock lifetime hasn't expired yet. So let's - # wait a while for the owner of the lock to give it up. - elif not loopcount % 100: - log.debug('waiting for claim: %s', self._lockfile) - self._sleep() - - def unlock(self, unconditionally=False): - """Unlock the lock. - - If we don't already own the lock (either because of unbalanced unlock - calls, or because the lock was stolen out from under us), raise a - NotLockedError, unless optional `unconditionally' is true. - """ - islocked = self.locked() - if not islocked and not unconditionally: - raise NotLockedError - # If we owned the lock, remove the global file, relinquishing it. - if islocked: - try: - os.unlink(self._lockfile) - except OSError, e: - if e.errno <> errno.ENOENT: - raise - # Remove our tempfile - try: - os.unlink(self._tmpfname) - except OSError, e: - if e.errno <> errno.ENOENT: - raise - log.debug('unlocked: %s', self._lockfile) - - def locked(self): - """Return true if we own the lock, false if we do not. - - Checking the status of the lock resets the lock's lifetime, which - helps avoid race conditions during the lock status test. - """ - # Discourage breaking the lock for a while. - try: - self._touch() - except OSError, e: - if e.errno == errno.EPERM: - # We can't touch the file because we're not the owner. I - # don't see how we can own the lock if we're not the owner. - return False - else: - raise - # XXX Can the link count ever be > 2? - if self._linkcount() <> 2: - return False - return self._read() == self._tmpfname - - def finalize(self): - log.debug('finalize: %s', self._lockfile) - self.unlock(unconditionally=True) - - def __del__(self): - log.debug('__del__: %s', self._lockfile) - if self._owned: - self.finalize() - - # Python 2.5 context manager protocol support. - def __enter__(self): - self.lock() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.unlock() - # Don't suppress any exception that might have occurred. - return False - - # Use these only if you're transfering ownership to a child process across - # a fork. Use at your own risk, but it should be race-condition safe. - # _transfer_to() is called in the parent, passing in the pid of the child. - # _take_possession() is called in the child, and blocks until the parent - # has transferred possession to the child. _disown() is used to set the - # _owned flag to false, and it is a disgusting wart necessary to make - # forced lock acquisition work in mailmanctl. :( - def _transfer_to(self, pid): - # First touch it so it won't get broken while we're fiddling about. - self._touch() - # Find out current claim's temp filename - winner = self._read() - # Now twiddle ours to the given pid - self._tmpfname = '%s.%s.%d' % ( - self._lockfile, socket.gethostname(), pid) - # Create a hard link from the global lock file to the temp file. This - # actually does things in reverse order of normal operation because we - # know that lockfile exists, and tmpfname better not! - os.link(self._lockfile, self._tmpfname) - # Now update the lock file to contain a reference to the new owner - self._write() - # Toggle off our ownership of the file so we don't try to finalize it - # in our __del__() - self._owned = False - # Unlink the old winner, completing the transfer - os.unlink(winner) - # And do some sanity checks - assert self._linkcount() == 2 - assert self.locked() - log.debug('transferred the lock: %s', self._lockfile) - - def _take_possession(self): - self._tmpfname = tmpfname = '%s.%s.%d' % ( - self._lockfile, socket.gethostname(), os.getpid()) - # Wait until the linkcount is 2, indicating the parent has completed - # the transfer. - while self._linkcount() <> 2 or self._read() <> tmpfname: - time.sleep(0.25) - log.debug('took possession of the lock: %s', self._lockfile) - - def _disown(self): - self._owned = False - - # - # Private interface - # - - def _write(self): - # Make sure it's group writable - fp = open(self._tmpfname, 'w') - try: - fp.write(self._tmpfname) - finally: - fp.close() - - def _read(self): - try: - fp = open(self._lockfile) - try: - filename = fp.read() - finally: - fp.close() - return filename - except EnvironmentError, e: - if e.errno <> errno.ENOENT: - raise - return None - - def _touch(self, filename=None): - expiration_date = datetime.datetime.now() + self._lifetime - t = time.mktime(expiration_date.timetuple()) - try: - # XXX We probably don't need to modify atime, but this is easier. - os.utime(filename or self._tmpfname, (t, t)) - except OSError, e: - if e.errno <> errno.ENOENT: - raise - - def _releasetime(self): - try: - return os.stat(self._lockfile).st_mtime - except OSError, e: - if e.errno <> errno.ENOENT: - raise - return -1 - - def _linkcount(self): - try: - return os.stat(self._lockfile).st_nlink - except OSError, e: - if e.errno <> errno.ENOENT: - raise - return -1 - - def _break(self): - # First, touch the global lock file. This reduces but does not - # eliminate the chance for a race condition during breaking. Two - # processes could both pass the test for lock expiry in lock() before - # one of them gets to touch the global lockfile. This shouldn't be - # too bad because all they'll do in this function is wax the lock - # files, not claim the lock, and we can be defensive for ENOENTs - # here. - # - # Touching the lock could fail if the process breaking the lock and - # the process that claimed the lock have different owners. We could - # solve this by set-uid'ing the CGI and mail wrappers, but I don't - # think it's that big a problem. - try: - self._touch(self._lockfile) - except OSError, e: - if e.errno <> errno.EPERM: - raise - # Get the name of the old winner's temp file. - winner = self._read() - # Remove the global lockfile, which actually breaks the lock. - try: - os.unlink(self._lockfile) - except OSError, e: - if e.errno <> errno.ENOENT: - raise - # Try to remove the old winner's temp file, since we're assuming the - # winner process has hung or died. Don't worry too much if we can't - # unlink their temp file -- this doesn't wreck the locking algorithm, - # but will leave temp file turds laying around, a minor inconvenience. - try: - if winner: - os.unlink(winner) - except OSError, e: - if e.errno <> errno.ENOENT: - raise - - def _sleep(self): - interval = random.random() * 2.0 + 0.01 - time.sleep(interval) - - - -# Unit test framework -def _dochild(): - prefix = '[%d]' % os.getpid() - # Create somewhere between 1 and 1000 locks - lockfile = LockFile('/tmp/LockTest', lifetime=120) - # Use a lock lifetime of between 1 and 15 seconds. Under normal - # situations, Mailman's usage patterns (untested) shouldn't be much longer - # than this. - workinterval = 5 * random.random() - hitwait = 20 * random.random() - print prefix, 'workinterval:', workinterval - islocked = False - t0 = 0 - t1 = 0 - t2 = 0 - try: - try: - t0 = time.time() - print prefix, 'acquiring...' - lockfile.lock() - print prefix, 'acquired...' - islocked = True - except TimeOutError: - print prefix, 'timed out' - else: - t1 = time.time() - print prefix, 'acquisition time:', t1-t0, 'seconds' - time.sleep(workinterval) - finally: - if islocked: - try: - lockfile.unlock() - t2 = time.time() - print prefix, 'lock hold time:', t2-t1, 'seconds' - except NotLockedError: - print prefix, 'lock was broken' - # wait for next web hit - print prefix, 'webhit sleep:', hitwait - time.sleep(hitwait) - - -def _seed(): - try: - fp = open('/dev/random') - d = fp.read(40) - fp.close() - except EnvironmentError, e: - if e.errno <> errno.ENOENT: - raise - import sha - d = sha.new(`os.getpid()`+`time.time()`).hexdigest() - random.seed(d) - - -def _onetest(): - loopcount = random.randint(1, 100) - for i in range(loopcount): - print 'Loop %d of %d' % (i+1, loopcount) - pid = os.fork() - if pid: - # parent, wait for child to exit - pid, status = os.waitpid(pid, 0) - else: - # child - _seed() - try: - _dochild() - except KeyboardInterrupt: - pass - os._exit(0) - - -def _reap(kids): - if not kids: - return - pid, status = os.waitpid(-1, os.WNOHANG) - if pid <> 0: - del kids[pid] - - -def _test(numtests): - kids = {} - for i in range(numtests): - pid = os.fork() - if pid: - # parent - kids[pid] = pid - else: - # child - _seed() - try: - _onetest() - except KeyboardInterrupt: - pass - os._exit(0) - # slightly randomize each kid's seed - while kids: - _reap(kids) - - -if __name__ == '__main__': - import sys - import random - _test(int(sys.argv[1])) diff --git a/Mailman/queue/archive.py b/Mailman/queue/archive.py index b0274d49c..518a0a44c 100644 --- a/Mailman/queue/archive.py +++ b/Mailman/queue/archive.py @@ -20,10 +20,11 @@ from __future__ import with_statement import time + from email.Utils import parsedate_tz, mktime_tz, formatdate +from locknix.lockfile import Lock from Mailman.configuration import config -from Mailman.lockfile import LockFile from Mailman.queue import Runner @@ -67,5 +68,5 @@ class ArchiveRunner(Runner): # Always put an indication of when we received the message. msg['X-List-Received-Date'] = receivedtime # While a list archiving lock is acquired, archive the message. - with LockFile(os.path.join(mlist.full_path, 'archive.lck')): + with Lock(os.path.join(mlist.full_path, 'archive.lck')): mlist.ArchiveMail(msg) diff --git a/Mailman/tests/test_documentation.py b/Mailman/tests/test_documentation.py index 825a95a3c..d8578bd05 100644 --- a/Mailman/tests/test_documentation.py +++ b/Mailman/tests/test_documentation.py @@ -18,6 +18,7 @@ """Harness for testing Mailman's documentation.""" import os +import pdb import doctest import unittest @@ -33,48 +34,22 @@ COMMASPACE = ', ' def cleaning_teardown(testobj): - usermgr = config.db.user_manager - listmgr = config.db.list_manager - # Remove all users, addresses and members, then delete all mailing lists. - for user in usermgr.users: - usermgr.delete_user(user) - for address in usermgr.addresses: - usermgr.delete_address(address) - for mlist in listmgr.mailing_lists: - for member in mlist.members.members: - member.unsubscribe() - for admin in mlist.administrators.members: - admin.unsubscribe() - requestdb = config.db.requests.get_list_requests(mlist) - for request in requestdb.held_requests: - requestdb.delete_request(request.id) - listmgr.delete(mlist) + """Clear all persistent data at the end of a doctest.""" + # Clear the database of all rows. + config.db._reset() flush() - assert not list(listmgr.mailing_lists), ( - 'There should be no mailing lists left: %s' % - COMMASPACE.join(sorted(listmgr.names))) - assert not list(usermgr.users), ( - 'There should be no users left!') - assert not list(usermgr.addresses), ( - 'There should be no addresses left!') - # Remove all queue files. - for dirpath, dirnames, filenames in os.walk(config.QUEUE_DIR): - for filename in filenames: - os.remove(os.path.join(dirpath, filename)) # Remove all but the default style. for style in style_manager.styles: if style.name <> 'default': style_manager.unregister(style) - # Clear the message store. - global_ids = [] - for msg in config.db.message_store.messages: - global_ids.append('%s/%s' % ( - msg['X-List-ID-Hash'], msg['X-List-Sequence-Number'])) - for global_id in global_ids: - config.db.message_store.delete_message(global_id) - flush() - assert not list(config.db.message_store.messages), ( - 'There should be no messages left in the message store.') + # Remove all queue files. + for dirpath, dirnames, filenames in os.walk(config.QUEUE_DIR): + for filename in filenames: + os.remove(os.path.join(dirpath, filename)) + # Clear out messages in the message store directory. + for dirpath, dirnames, filenames in os.walk(config.MESSAGES_DIR): + for filename in filenames: + os.remove(os.path.join(dirpath, filename)) diff --git a/Mailman/tests/test_lockfile.py b/Mailman/tests/test_lockfile.py deleted file mode 100644 index 9d6420e74..000000000 --- a/Mailman/tests/test_lockfile.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2002-2007 by the Free Software Foundation, Inc. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -# USA. - -"""Unit tests for the LockFile class.""" - -import os -import shutil -import tempfile -import unittest - -from Mailman.lockfile import LockFile - -LOCKFILE_NAME = '.mm-test-lock' - - - -class TestLockFile(unittest.TestCase): - def setUp(self): - self._tmpdir = tempfile.mkdtemp(prefix='mmtest') - self._lockf = os.path.join(self._tmpdir, LOCKFILE_NAME) - - def tearDown(self): - shutil.rmtree(self._tmpdir) - - # XXX There really should be additional multi-thread or -proc tests, a la - # the __main__ of LockFile.py - - def test_two_lockfiles_same_proc(self): - lf1 = LockFile(LOCKFILE_NAME) - lf2 = LockFile(LOCKFILE_NAME) - lf1.lock() - self.failIf(lf2.locked()) - - - -def test_suite(): - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(TestLockFile)) - return suite @@ -91,7 +91,8 @@ Any other spelling is incorrect.""", # Third-party requirements. install_requires = [ 'Elixir', - 'SQLAlchemy>=0.3.10', + 'SQLAlchemy', + 'locknix', 'munepy', 'wsgiref', 'zope.interface', |
