1 # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """A library for managing file locks."""
7 from __future__ import print_function
14 LOCK_DIR = '/tmp/run_once'
17 class LockNotAcquired(Exception):
18 """Raised when the run_once lock is already held by another lock object.
20 Note that this can happen even within the same process. A second lock
21 object targeting the same lock file will fail to acquire the lock regardless
24 self.lock_file_path has path to lock file involved.
25 self.owner_pid has pid of process that currently has lock.
28 def __init__(self, lock_file_path, owner_pid, *args, **kwargs):
29 Exception.__init__(self, *args, **kwargs)
30 self.lock_file_path = lock_file_path
31 self.owner_pid = owner_pid
34 return "Lock (%s) held by pid %s" % (self.lock_file_path, self.owner_pid)
38 """This class grabs an exclusive flock on a file in a specified directory.
40 This class can be used in combination with the "with" statement.
42 Because the lock is associated with a file descriptor, the lock will
43 continue to be held for as long as the file descriptor is open (even
44 in subprocesses, exec'd executables, etc).
46 For informational purposes only, the pid of the current process is
47 written into the lock file when it is held, but it's never removed.
50 def __init__(self, lock_name, lock_dir=None, blocking=False, shared=False):
51 """Setup our lock class, but don't do any work yet (or grab the lock).
54 lock_name: The file name of the lock to file. If it's a relative name,
55 it will be expanded based on lock_dir.
56 lock_dir: is the directory in which to create lock files, it defaults
58 blocking: When trying to acquire a lock, do we block until it's
59 available, or raise "LockNotAcquired"? If we block,
61 shared: A value of False means get an exclusive lock, and True
62 means to get a shared lock.
67 # os.path.join will ignore lock_dir, if lock_name is an absolute path.
68 self.lock_file = os.path.join(lock_dir, lock_name)
70 self._blocking = blocking
77 It's safe to call this multiple times, though the first Unlock will
80 # Create the directory for our lock files if it doesn't already exist
82 os.makedirs(os.path.dirname(self.lock_file))
84 if e.errno is not errno.EEXIST:
88 self._fd = open(self.lock_file, 'a')
96 if not self._blocking:
97 flags |= fcntl.LOCK_NB
99 fcntl.flock(self._fd, flags)
101 # We have the lock, write our pid into it for informational purposes.
103 self._fd.write(str(os.getpid())+'\n')
110 # We got the error that someone else already held the locked.
111 # Can only happen if we are blocking == False.
112 if e.errno == errno.EAGAIN:
113 # To be helpful, grab pid of owner process from file.
115 with open(self.lock_file, 'r') as f:
116 owner_pid = f.read().strip()
118 raise LockNotAcquired(self.lock_file, owner_pid)
120 # Pass along any other error for debugging
124 """Release the flock."""
127 fcntl.flock(self._fd, fcntl.LOCK_UN)
132 """Return True if lock is currently acquired."""
133 return bool(self._fd)
135 # Lock objects can be used with "with" statements.
140 def __exit__(self, _type, _value, _traceback):
144 def ExecWithLock(cmd, lock_name=None, lock_dir=None, blocking=False):
145 """Helper method that execs another program with an flock.
148 cmd: The command to run through flock.
149 lock_name: defaults to the name of the command.
150 lock_dir: defaults to LOCK_DIR.
151 blocking: Whether to take a blocking lock.
154 LockNotAcquired: If the lock wasn't acquired
157 lock_name = os.path.basename(cmd[0])
159 with Lock(lock_name, lock_dir=lock_dir, blocking=blocking):
160 # Our lock file is locked, exec our subprocess. It has an extra fd
161 # in it's environment, and the lock on that fd will be held until
162 # that fd is closed on exit.
163 os.execvp(cmd[0], cmd)
165 # Note that the above new process will not return here, which has
166 # the effect of never exiting this 'with' context, which means
167 # Lock.Unlock() is never called. The lock is released all the same.