1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
|
# Copyright (C) 2001-2008 by the Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
from __future__ import with_statement
import os
import sys
import errno
import signal
import socket
import logging
import optparse
from datetime import timedelta
from locknix import lockfile
from munepy import Enum
from Mailman import Defaults
from Mailman import Version
from Mailman import loginit
from Mailman.configuration import config
from Mailman.i18n import _
from Mailman.initialize import initialize
DOT = '.'
LOCK_LIFETIME = Defaults.days(1) + Defaults.hours(6)
log = None
parser = None
def parseargs():
parser = optparse.OptionParser(version=Version.MAILMAN_VERSION,
usage=_("""\
Master queue runner watcher.
Start and watch the configured queue runners and ensure that they stay alive
and kicking. Each are fork and exec'd in turn, with the master waiting on
their process ids. When it detects a child queue runner has exited, it may
restart it.
The queue runners respond to SIGINT, SIGTERM, SIGUSR1 and SIGHUP. SIGINT,
SIGTERM and SIGUSR1 all cause the qrunners to exit cleanly. The master will
restart qrunners that have exited due to a SIGUSR1 or some kind of other exit
condition (say because of an exception). SIGHUP causes the master and the
qrunners to close their log files, and reopen then upon the next printed
message.
The master also responds to SIGINT, SIGTERM, SIGUSR1 and SIGHUP, which it
simply passes on to the qrunners. Note that the master will close and reopen
its own log files on receipt of a SIGHUP. The master also leaves its own
process id in the file `data/master-qrunner.pid` but you normally don't need
to use this pid directly.
Usage: %prog [options]"""))
parser.add_option('-n', '--no-restart',
dest='restartable', default=True, action='store_false',
help=_("""\
Don't restart the qrunners when they exit because of an error or a SIGUSR1.
Use this only for debugging."""))
parser.add_option('-f', '--force',
default=False, action='store_true',
help=_("""\
If the master watcher finds an existing master lock, it will normally exit
with an error message. With this option,the master will perform an extra
level of checking. If a process matching the host/pid described in the lock
file is running, the master will still exit, requiring you to manually clean
up the lock. But if no matching process is found, the master will remove the
apparently stale lock and make another attempt to claim the master lock."""))
parser.add_option('-C', '--config',
help=_('Alternative configuration file to use'))
options, arguments = parser.parse_args()
if len(arguments) > 0:
parser.error(_('Too many arguments'))
parser.options = options
parser.arguments = arguments
return parser
def get_lock_data():
"""Get information from the master lock file.
:return: A 3-tuple of the hostname, integer process id, and file name of
the lock file.
"""
with open(config.LOCK_FILE) as fp:
filename = os.path.split(fp.read().strip())[1]
parts = filename.split('.')
hostname = DOT.join(parts[1:-2])
pid = int(parts[-2])
return hostname, int(pid), filename
class WatcherState(Enum):
# Another master watcher is running.
conflict = 1
# No conflicting process exists.
stale_lock = 2
# Hostname from lock file doesn't match.
host_mismatch = 3
def master_state():
"""Get the state of the master watcher.
:return: WatcherState describing the state of the lock file.
"""
# 1 if proc exists on host (but is it qrunner? ;)
# 0 if host matches but no proc
# hostname if hostname doesn't match
hostname, pid, tempfile = get_lock_data()
if hostname <> socket.gethostname():
return WatcherState.host_mismatch
# Find out if the process exists by calling kill with a signal 0.
try:
os.kill(pid, 0)
return WatcherState.conflict
except OSError, e:
if e.errno == errno.ESRCH:
# No matching process id.
return WatcherState.stale_lock
# Some other error occurred.
raise
def acquire_lock_1(force):
"""Try to acquire the master queue runner lock.
:param force: Flag that controls whether to force acquisition of the lock.
:return: The master queue runner lock.
:raises: `TimeOutError` if the lock could not be acquired.
"""
lock = lockfile.Lock(config.LOCK_FILE, LOCK_LIFETIME)
try:
lock.lock(timedelta(seconds=0.1))
return lock
except lockfile.TimeOutError:
if not force:
raise
# Force removal of lock first.
lock.disown()
hostname, pid, tempfile = get_lock_data()
os.unlink(config.LOCK_FILE)
os.unlink(os.path.join(config.LOCK_DIR, tempfile))
return acquire_lock_1(force=False)
def acquire_lock():
"""Acquire the master queue runner lock.
:return: The master queue runner lock or None if the lock couldn't be
acquired. In that case, an error messages is also printed to standard
error.
"""
try:
lock = acquire_lock_1(parser.options.force)
return lock
except lockfile.TimeOutError:
status = master_state()
if status == WatcherState.conflict:
# Hostname matches and process exists.
message = _("""\
The master qrunner lock could not be acquired because it appears
as though another master qrunner is already running.
""")
elif status == WatcherState.stale_lock:
# Hostname matches but the process does not exist.
message = _("""\
The master qrunner lock could not be acquired. It appears as though there is
a stale master qrunner lock. Try re-running mailmanctl with the -s flag.
""")
else:
assert status == WatcherState.host_mismatch, (
'Invalid enum value: %s' % status)
# Hostname doesn't even match.
hostname, pid, tempfile = get_lock_data()
message = _("""\
The master qrunner lock could not be acquired, because it appears as if some
process on some other host may have acquired it. We can't test for stale
locks across host boundaries, so you'll have to clean this up manually.
Lock file: $config.LOCK_FILE
Lock host: $hostname
Exiting.""")
parser.error(message)
def start_runner(qrname, slice, count):
"""Start a queue runner.
All arguments are passed to the qrunner process.
:param qrname: The name of the queue runner.
:param slice: The slice number.
:param count: The total number of slices.
:return: The process id of the child queue runner.
"""
pid = os.fork()
if pid:
# Parent.
return pid
# Child.
#
# Craft the command line arguments for the exec() call.
rswitch = '--runner=%s:%d:%d' % (qrname, slice, count)
# Wherever mailmanctl lives, so too must live the qrunner script.
exe = os.path.join(config.BIN_DIR, 'qrunner')
# config.PYTHON, which is the absolute path to the Python interpreter,
# must be given as argv[0] due to Python's library search algorithm.
args = [sys.executable, sys.executable, exe, rswitch, '-s']
if parser.options.config:
args.extend(['-C', parser.options.config])
log.debug('starting: %s', args)
os.execl(*args)
# We should never get here.
raise RuntimeError('os.execl() failed')
def control_loop(lock):
"""The main control loop.
This starts up the queue runners, watching for their exit and restarting
them if need be.
"""
restartable = parser.options.restartable
# Start all the qrunners. Keep a dictionary mapping process ids to
# information about the child processes.
kids = {}
# Set up our signal handlers. Also set up a SIGALRM handler to refresh
# the lock once per day. The lock lifetime is 1 day + 6 hours so this
# should be plenty.
def sigalrm_handler(signum, frame):
lock.refresh()
signal.alarm(int(Defaults.days(1)))
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(int(Defaults.days(1)))
# SIGHUP tells the qrunners to close and reopen their log files.
def sighup_handler(signum, frame):
loginit.reopen()
for pid in kids:
os.kill(pid, signal.SIGHUP)
log.info('Master watcher caught SIGHUP. Re-opening log files.')
signal.signal(signal.SIGHUP, sighup_handler)
# SIGUSR1 is used by 'mailman restart'.
def sigusr1_handler(signum, frame):
for pid in kids:
os.kill(pid, signal.SIGUSR1)
log.info('Master watcher caught SIGUSR1. Exiting.')
signal.signal(signal.SIGUSR1, sigusr1_handler)
# SIGTERM is what init will kill this process with when changing run
# levels. It's also the signal 'mailmanctl stop' uses.
def sigterm_handler(signum, frame):
for pid in kids:
os.kill(pid, signal.SIGTERM)
log.info('Master watcher caught SIGTERM. Exiting.')
signal.signal(signal.SIGTERM, sigterm_handler)
# SIGINT is what control-C gives.
def sigint_handler(signum, frame):
for pid in kids:
os.kill(pid, signal.SIGINT)
log.info('Master watcher caught SIGINT. Restarting.')
signal.signal(signal.SIGINT, sigint_handler)
# Start all the child qrunners.
for qrname, count in config.qrunners.items():
for slice_number in range(count):
# queue runner name, slice number, number of slices, restart count
info = (qrname, slice_number, count, 0)
pid = start_runner(qrname, slice_number, count)
kids[pid] = info
# Enter the main wait loop.
try:
while True:
try:
pid, status = os.wait()
except OSError, error:
# No children? We're done.
if error.errno == errno.ECHILD:
break
# If the system call got interrupted, just restart it.
elif error.errno == errno.EINTR:
continue
else:
raise
# Find out why the subprocess exited by getting the signal
# received or exit status.
if os.WIFSIGNALED(status):
why = os.WTERMSIG(status)
elif os.WIFEXITED(status):
why = os.WEXITSTATUS(status)
else:
why = None
# We'll restart the subprocess if it exited with a SIGUSR1 or
# because of a failure (i.e. no exit signal), and the no-restart
# command line switch was not given. This lets us better handle
# runaway restarts (e.g. if the subprocess had a syntax error!)
qrname, slice, count, restarts = kids.pop(pid)
restart = False
if why == signal.SIGUSR1 and restartable:
restart = True
# Have we hit the maximum number of restarts?
restarts += 1
if restarts > config.MAX_RESTARTS:
restart = False
# Are we permanently non-restartable?
log.debug("""\
Master detected subprocess exit
(pid: %d, why: %s, class: %s, slice: %d/%d) %s""",
pid, why, qrname, slice+1, count,
('[restarting]' if restart else ''))
# See if we've reached the maximum number of allowable restarts
if restarts > config.MAX_RESTARTS:
log.info("""\
qrunner %s reached maximum restart limit of %d, not restarting.""",
qrname, config.MAX_RESTARTS)
# Now perhaps restart the process unless it exited with a
# SIGTERM or we aren't restarting.
if restart:
newpid = start_runner(qrname, slice, count)
kids[newpid] = (qrname, slice, count, restarts)
finally:
# Should we leave the main loop for any reason, we want to be sure
# all of our children are exited cleanly. Send SIGTERMs to all
# the child processes and wait for them all to exit.
for pid in kids:
try:
os.kill(pid, signal.SIGTERM)
except OSError, error:
if error.errno == errno.ESRCH:
# The child has already exited.
log.info('ESRCH on pid: %d', pid)
# Wait for all the children to go away.
while kids:
try:
pid, status = os.wait()
del kids[pid]
except OSError, e:
if e.errno == errno.ECHILD:
break
elif e.errno == errno.EINTR:
continue
raise
def main():
"""Main process."""
global log, parser
parser = parseargs()
initialize(parser.options.config)
# We can't grab the logger until after everything's been initialized.
log = logging.getLogger('mailman.qrunner')
# Acquire the master lock, exiting if we can't acquire it. We'll let the
# caller handle any clean up or lock breaking. No with statement here
# because Lock's constructor doesn't support a timeout.
lock = acquire_lock()
try:
with open(config.PIDFILE, 'w') as fp:
print >> fp, os.getpid()
try:
control_loop(lock)
finally:
os.remove(config.PIDFILE)
finally:
lock.unlock()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|