#define AFSCBMAX 50 /* maximum callbacks transferred per bulk op */
+struct afs_uuid {
+ __be32 time_low; /* low part of timestamp */
+ __be16 time_mid; /* mid part of timestamp */
+ __be16 time_hi_and_version; /* high part of timestamp and version */
+ __u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
+ __u8 clock_seq_low; /* clock seq low */
+ __u8 node[6]; /* spatially unique node ID (MAC addr) */
+};
+
/*
* AFS volume information
*/
CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \
ARRAY_SIZE((server)->cb_break))
-//static void afs_callback_updater(struct work_struct *);
-
-static struct workqueue_struct *afs_callback_update_worker;
+struct workqueue_struct *afs_callback_update_worker;
/*
* allow the fileserver to request callback state (re-)initialisation
* had callbacks entirely, and the server will call us later to break
* them
*/
- afs_fs_give_up_callbacks(server, true);
+ afs_fs_give_up_callbacks(server->cell->net, server, true);
}
/*
afs_put_vnode(vl);
}
#endif
-
-/*
- * initialise the callback update process
- */
-int __init afs_callback_update_init(void)
-{
- afs_callback_update_worker = alloc_ordered_workqueue("kafs_callbackd",
- WQ_MEM_RECLAIM);
- return afs_callback_update_worker ? 0 : -ENOMEM;
-}
-
-/*
- * shut down the callback update process
- */
-void afs_callback_update_kill(void)
-{
- destroy_workqueue(afs_callback_update_worker);
-}
#include <keys/rxrpc-type.h>
#include "internal.h"
-DECLARE_RWSEM(afs_proc_cells_sem);
-LIST_HEAD(afs_proc_cells);
-
-static LIST_HEAD(afs_cells);
-static DEFINE_RWLOCK(afs_cells_lock);
-static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
-static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq);
-static struct afs_cell *afs_cell_root;
-
/*
* allocate a cell record and fill in its name, VL server address list and
* allocate an anonymous key
*/
-static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen,
+static struct afs_cell *afs_cell_alloc(struct afs_net *net,
+ const char *name, unsigned namelen,
char *vllist)
{
struct afs_cell *cell;
atomic_set(&cell->usage, 1);
INIT_LIST_HEAD(&cell->link);
+ cell->net = net;
rwlock_init(&cell->servers_lock);
INIT_LIST_HEAD(&cell->servers);
init_rwsem(&cell->vl_sem);
/*
* afs_cell_crate() - create a cell record
+ * @net: The network namespace
* @name: is the name of the cell.
* @namsesz: is the strlen of the cell name.
* @vllist: is a colon separated list of IP addresses in "a.b.c.d" format.
* @retref: is T to return the cell reference when the cell exists.
*/
-struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
+struct afs_cell *afs_cell_create(struct afs_net *net,
+ const char *name, unsigned namesz,
char *vllist, bool retref)
{
struct afs_cell *cell;
_enter("%*.*s,%s", namesz, namesz, name ?: "", vllist);
- down_write(&afs_cells_sem);
- read_lock(&afs_cells_lock);
- list_for_each_entry(cell, &afs_cells, link) {
+ down_write(&net->cells_sem);
+ read_lock(&net->cells_lock);
+ list_for_each_entry(cell, &net->cells, link) {
if (strncasecmp(cell->name, name, namesz) == 0)
goto duplicate_name;
}
- read_unlock(&afs_cells_lock);
+ read_unlock(&net->cells_lock);
- cell = afs_cell_alloc(name, namesz, vllist);
+ cell = afs_cell_alloc(net, name, namesz, vllist);
if (IS_ERR(cell)) {
_leave(" = %ld", PTR_ERR(cell));
- up_write(&afs_cells_sem);
+ up_write(&net->cells_sem);
return cell;
}
/* add a proc directory for this cell */
- ret = afs_proc_cell_setup(cell);
+ ret = afs_proc_cell_setup(net, cell);
if (ret < 0)
goto error;
#endif
/* add to the cell lists */
- write_lock(&afs_cells_lock);
- list_add_tail(&cell->link, &afs_cells);
- write_unlock(&afs_cells_lock);
+ write_lock(&net->cells_lock);
+ list_add_tail(&cell->link, &net->cells);
+ write_unlock(&net->cells_lock);
- down_write(&afs_proc_cells_sem);
- list_add_tail(&cell->proc_link, &afs_proc_cells);
- up_write(&afs_proc_cells_sem);
- up_write(&afs_cells_sem);
+ down_write(&net->proc_cells_sem);
+ list_add_tail(&cell->proc_link, &net->proc_cells);
+ up_write(&net->proc_cells_sem);
+ up_write(&net->cells_sem);
_leave(" = %p", cell);
return cell;
error:
- up_write(&afs_cells_sem);
+ up_write(&net->cells_sem);
key_put(cell->anonymous_key);
kfree(cell);
_leave(" = %d", ret);
if (retref && !IS_ERR(cell))
afs_get_cell(cell);
- read_unlock(&afs_cells_lock);
- up_write(&afs_cells_sem);
+ read_unlock(&net->cells_lock);
+ up_write(&net->cells_sem);
if (retref) {
_leave(" = %p", cell);
* - can be called with a module parameter string
* - can be called from a write to /proc/fs/afs/rootcell
*/
-int afs_cell_init(char *rootcell)
+int afs_cell_init(struct afs_net *net, char *rootcell)
{
struct afs_cell *old_root, *new_root;
char *cp;
*cp++ = 0;
/* allocate a cell record for the root cell */
- new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false);
+ new_root = afs_cell_create(net, rootcell, strlen(rootcell), cp, false);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
}
/* install the new cell */
- write_lock(&afs_cells_lock);
- old_root = afs_cell_root;
- afs_cell_root = new_root;
- write_unlock(&afs_cells_lock);
+ write_lock(&net->cells_lock);
+ old_root = net->ws_cell;
+ net->ws_cell = new_root;
+ write_unlock(&net->cells_lock);
afs_put_cell(old_root);
_leave(" = 0");
/*
* lookup a cell record
*/
-struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
+struct afs_cell *afs_cell_lookup(struct afs_net *net,
+ const char *name, unsigned namesz,
bool dns_cell)
{
struct afs_cell *cell;
_enter("\"%*.*s\",", namesz, namesz, name ?: "");
- down_read(&afs_cells_sem);
- read_lock(&afs_cells_lock);
+ down_read(&net->cells_sem);
+ read_lock(&net->cells_lock);
if (name) {
/* if the cell was named, look for it in the cell record list */
- list_for_each_entry(cell, &afs_cells, link) {
+ list_for_each_entry(cell, &net->cells, link) {
if (strncmp(cell->name, name, namesz) == 0) {
afs_get_cell(cell);
goto found;
found:
;
} else {
- cell = afs_cell_root;
+ cell = net->ws_cell;
if (!cell) {
/* this should not happen unless user tries to mount
* when root cell is not set. Return an impossibly
}
- read_unlock(&afs_cells_lock);
- up_read(&afs_cells_sem);
+ read_unlock(&net->cells_lock);
+ up_read(&net->cells_sem);
_leave(" = %p", cell);
return cell;
create_cell:
- read_unlock(&afs_cells_lock);
- up_read(&afs_cells_sem);
+ read_unlock(&net->cells_lock);
+ up_read(&net->cells_sem);
- cell = afs_cell_create(name, namesz, NULL, true);
+ cell = afs_cell_create(net, name, namesz, NULL, true);
_leave(" = %p", cell);
return cell;
*/
struct afs_cell *afs_get_cell_maybe(struct afs_cell *cell)
{
- write_lock(&afs_cells_lock);
+ write_lock(&net->cells_lock);
if (cell && !list_empty(&cell->link))
afs_get_cell(cell);
else
cell = NULL;
- write_unlock(&afs_cells_lock);
+ write_unlock(&net->cells_lock);
return cell;
}
#endif /* 0 */
/* to prevent a race, the decrement and the dequeue must be effectively
* atomic */
- write_lock(&afs_cells_lock);
+ write_lock(&cell->net->cells_lock);
if (likely(!atomic_dec_and_test(&cell->usage))) {
- write_unlock(&afs_cells_lock);
+ write_unlock(&cell->net->cells_lock);
_leave("");
return;
}
ASSERT(list_empty(&cell->servers));
ASSERT(list_empty(&cell->vl_list));
- write_unlock(&afs_cells_lock);
+ wake_up(&cell->net->cells_freeable_wq);
- wake_up(&afs_cells_freeable_wq);
+ write_unlock(&cell->net->cells_lock);
_leave(" [unused]");
}
/*
* destroy a cell record
- * - must be called with the afs_cells_sem write-locked
+ * - must be called with the net->cells_sem write-locked
* - cell->link should have been broken by the caller
*/
-static void afs_cell_destroy(struct afs_cell *cell)
+static void afs_cell_destroy(struct afs_net *net, struct afs_cell *cell)
{
_enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
_debug("wait for cell %s", cell->name);
set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&afs_cells_freeable_wq, &myself);
+ add_wait_queue(&net->cells_freeable_wq, &myself);
while (atomic_read(&cell->usage) > 0) {
schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
}
- remove_wait_queue(&afs_cells_freeable_wq, &myself);
+ remove_wait_queue(&net->cells_freeable_wq, &myself);
set_current_state(TASK_RUNNING);
}
ASSERT(list_empty(&cell->servers));
ASSERT(list_empty(&cell->vl_list));
- afs_proc_cell_remove(cell);
+ afs_proc_cell_remove(net, cell);
- down_write(&afs_proc_cells_sem);
+ down_write(&net->proc_cells_sem);
list_del_init(&cell->proc_link);
- up_write(&afs_proc_cells_sem);
+ up_write(&net->proc_cells_sem);
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(cell->cache, 0);
* purge in-memory cell database on module unload or afs_init() failure
* - the timeout daemon is stopped before calling this
*/
-void afs_cell_purge(void)
+void afs_cell_purge(struct afs_net *net)
{
struct afs_cell *cell;
_enter("");
- afs_put_cell(afs_cell_root);
+ afs_put_cell(net->ws_cell);
- down_write(&afs_cells_sem);
+ down_write(&net->cells_sem);
- while (!list_empty(&afs_cells)) {
+ while (!list_empty(&net->cells)) {
cell = NULL;
/* remove the next cell from the front of the list */
- write_lock(&afs_cells_lock);
+ write_lock(&net->cells_lock);
- if (!list_empty(&afs_cells)) {
- cell = list_entry(afs_cells.next,
+ if (!list_empty(&net->cells)) {
+ cell = list_entry(net->cells.next,
struct afs_cell, link);
list_del_init(&cell->link);
}
- write_unlock(&afs_cells_lock);
+ write_unlock(&net->cells_lock);
if (cell) {
_debug("PURGING CELL %s (%d)",
cell->name, atomic_read(&cell->usage));
/* now the cell should be left with no references */
- afs_cell_destroy(cell);
+ afs_cell_destroy(net, cell);
}
}
- up_write(&afs_cells_sem);
+ up_write(&net->cells_sem);
_leave("");
}
switch (call->unmarshall) {
case 0:
- rxrpc_kernel_get_peer(afs_socket, call->rxcall, &srx);
+ rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
call->offset = 0;
call->unmarshall++;
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- server = afs_find_server(&srx);
+ server = afs_find_server(call->net, &srx);
if (!server)
return -ENOTCONN;
call->server = server;
_enter("");
- rxrpc_kernel_get_peer(afs_socket, call->rxcall, &srx);
+ rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
ret = afs_extract_data(call, NULL, 0, false);
if (ret < 0)
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- server = afs_find_server(&srx);
+ server = afs_find_server(call->net, &srx);
if (!server)
return -ENOTCONN;
call->server = server;
_enter("");
- rxrpc_kernel_get_peer(afs_socket, call->rxcall, &srx);
+ rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
_enter("{%u}", call->unmarshall);
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- server = afs_find_server(&srx);
+ server = afs_find_server(call->net, &srx);
if (!server)
return -ENOTCONN;
call->server = server;
_enter("");
- if (memcmp(r, &afs_uuid, sizeof(afs_uuid)) == 0)
+ if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
reply.match = htonl(0);
else
reply.match = htonl(1);
memset(&reply, 0, sizeof(reply));
reply.ia.nifs = htonl(nifs);
- reply.ia.uuid[0] = afs_uuid.time_low;
- reply.ia.uuid[1] = htonl(ntohs(afs_uuid.time_mid));
- reply.ia.uuid[2] = htonl(ntohs(afs_uuid.time_hi_and_version));
- reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved);
- reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low);
+ reply.ia.uuid[0] = call->net->uuid.time_low;
+ reply.ia.uuid[1] = htonl(ntohs(call->net->uuid.time_mid));
+ reply.ia.uuid[2] = htonl(ntohs(call->net->uuid.time_hi_and_version));
+ reply.ia.uuid[3] = htonl((s8) call->net->uuid.clock_seq_hi_and_reserved);
+ reply.ia.uuid[4] = htonl((s8) call->net->uuid.clock_seq_low);
for (loop = 0; loop < 6; loop++)
- reply.ia.uuid[loop + 5] = htonl((s8) afs_uuid.node[loop]);
+ reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]);
if (ifs) {
for (loop = 0; loop < nifs; loop++) {
#define AFS_LOCK_GRANTED 0
#define AFS_LOCK_PENDING 1
+struct workqueue_struct *afs_lock_manager;
+
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
static void afs_fl_release_private(struct file_lock *fl);
-static struct workqueue_struct *afs_lock_manager;
-static DEFINE_MUTEX(afs_lock_manager_mutex);
-
static const struct file_lock_operations afs_lock_ops = {
.fl_copy_lock = afs_fl_copy_lock,
.fl_release_private = afs_fl_release_private,
};
/*
- * initialise the lock manager thread if it isn't already running
- */
-static int afs_init_lock_manager(void)
-{
- int ret;
-
- ret = 0;
- if (!afs_lock_manager) {
- mutex_lock(&afs_lock_manager_mutex);
- if (!afs_lock_manager) {
- afs_lock_manager = alloc_workqueue("kafs_lockd",
- WQ_MEM_RECLAIM, 0);
- if (!afs_lock_manager)
- ret = -ENOMEM;
- }
- mutex_unlock(&afs_lock_manager_mutex);
- }
- return ret;
-}
-
-/*
- * destroy the lock manager thread if it's running
- */
-void __exit afs_kill_lock_manager(void)
-{
- if (afs_lock_manager)
- destroy_workqueue(afs_lock_manager);
-}
-
-/*
* if the callback is broken on this vnode, then the lock may now be available
*/
void afs_lock_may_be_available(struct afs_vnode *vnode)
if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
return -EINVAL;
- ret = afs_init_lock_manager();
- if (ret < 0)
- return ret;
-
fl->fl_ops = &afs_lock_ops;
INIT_LIST_HEAD(&fl->fl_u.afs.link);
fl->fl_u.afs.state = AFS_LOCK_PENDING;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%x:%u},,",
key_serial(key), vnode->fid.vid, vnode->fid.vnode);
- call = afs_alloc_flat_call(&afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(&afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
if (upper_32_bits(req->pos) ||
_enter("");
- call = afs_alloc_flat_call(&afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
if (!call)
return -ENOMEM;
* give up a set of callbacks
* - the callbacks are held in the server->cb_break ring
*/
-int afs_fs_give_up_callbacks(struct afs_server *server,
+int afs_fs_give_up_callbacks(struct afs_net *net,
+ struct afs_server *server,
bool async)
{
struct afs_call *call;
_debug("break %zu callbacks", ncallbacks);
- call = afs_alloc_flat_call(&afs_RXFSGiveUpCallBacks,
+ call = afs_alloc_flat_call(net, &afs_RXFSGiveUpCallBacks,
12 + ncallbacks * 6 * 4, 0);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz + (6 * 4);
- call = afs_alloc_flat_call(&afs_RXFSCreateXXXX, reqsz,
+ call = afs_alloc_flat_call(net, &afs_RXFSCreateXXXX, reqsz,
(3 + 21 + 21 + 3 + 6) * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz;
- call = afs_alloc_flat_call(&afs_RXFSRemoveXXXX, reqsz, (21 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSRemoveXXXX, reqsz, (21 + 6) * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz + (3 * 4);
- call = afs_alloc_flat_call(&afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
size_t namesz, reqsz, padsz, c_namesz, c_padsz;
__be32 *bp;
reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4);
- call = afs_alloc_flat_call(&afs_RXFSSymlink, reqsz,
+ call = afs_alloc_flat_call(net, &afs_RXFSSymlink, reqsz,
(3 + 21 + 21 + 6) * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(orig_dvnode);
size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
__be32 *bp;
(3 * 4) +
4 + n_namesz + n_padsz;
- call = afs_alloc_flat_call(&afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
if (!call)
return -ENOMEM;
{
struct afs_vnode *vnode = wb->vnode;
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%x:%u},,",
key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
- call = afs_alloc_flat_call(&afs_RXFSStoreData64,
+ call = afs_alloc_flat_call(net, &afs_RXFSStoreData64,
(4 + 6 + 3 * 2) * 4,
(21 + 6) * 4);
if (!call)
{
struct afs_vnode *vnode = wb->vnode;
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
loff_t size, pos, i_size;
__be32 *bp;
return afs_fs_store_data64(server, wb, first, last, offset, to,
size, pos, i_size, async);
- call = afs_alloc_flat_call(&afs_RXFSStoreData,
+ call = afs_alloc_flat_call(net, &afs_RXFSStoreData,
(4 + 6 + 3) * 4,
(21 + 6) * 4);
if (!call)
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%x:%u},,",
ASSERT(attr->ia_valid & ATTR_SIZE);
- call = afs_alloc_flat_call(&afs_RXFSStoreData64_as_Status,
+ call = afs_alloc_flat_call(net, &afs_RXFSStoreData64_as_Status,
(4 + 6 + 3 * 2) * 4,
(21 + 6) * 4);
if (!call)
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%x:%u},,",
return afs_fs_setattr_size64(server, key, vnode, attr,
async);
- call = afs_alloc_flat_call(&afs_RXFSStoreData_as_Status,
+ call = afs_alloc_flat_call(net, &afs_RXFSStoreData_as_Status,
(4 + 6 + 3) * 4,
(21 + 6) * 4);
if (!call)
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
if (attr->ia_valid & ATTR_SIZE)
_enter(",%x,{%x:%u},,",
key_serial(key), vnode->fid.vid, vnode->fid.vnode);
- call = afs_alloc_flat_call(&afs_RXFSStoreStatus,
+ call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus,
(4 + 6) * 4,
(21 + 6) * 4);
if (!call)
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
void *tmpbuf;
if (!tmpbuf)
return -ENOMEM;
- call = afs_alloc_flat_call(&afs_RXFSGetVolumeStatus, 2 * 4, 12 * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4, 12 * 4);
if (!call) {
kfree(tmpbuf);
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(&afs_RXFSSetLock, 5 * 4, 6 * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(&afs_RXFSExtendLock, 4 * 4, 6 * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
if (!call)
return -ENOMEM;
bool async)
{
struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(&afs_RXFSReleaseLock, 4 * 4, 6 * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
if (!call)
return -ENOMEM;
#include <linux/fscache.h>
#include <linux/backing-dev.h>
#include <linux/uuid.h>
+#include <net/net_namespace.h>
#include <net/af_rxrpc.h>
#include "afs.h"
afs_voltype_t type; /* type of volume requested */
int volnamesz; /* size of volume name */
const char *volname; /* name of volume to mount */
+ struct afs_net *net; /* Network namespace in effect */
struct afs_cell *cell; /* cell in which to find volume */
struct afs_volume *volume; /* volume record */
struct key *key; /* key to use for secure mounting */
AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */
AFS_CALL_COMPLETE, /* Completed or failed */
};
+
/*
* a record of an in-progress RxRPC call
*/
struct work_struct work; /* actual work processor */
struct rxrpc_call *rxcall; /* RxRPC call handle */
struct key *key; /* security for this call */
+ struct afs_net *net; /* The network namespace */
struct afs_server *server; /* server affected by incoming CM call */
void *request; /* request data (first part) */
struct address_space *mapping; /* page set */
* - there's one superblock per volume
*/
struct afs_super_info {
+ struct afs_net *net; /* Network namespace */
struct afs_volume *volume; /* volume record */
char rwparent; /* T if parent is R/W AFS volume */
};
};
/*
+ * AFS network namespace record.
+ */
+struct afs_net {
+ struct afs_uuid uuid;
+ bool live; /* F if this namespace is being removed */
+
+ /* AF_RXRPC I/O stuff */
+ struct socket *socket;
+ struct afs_call *spare_incoming_call;
+ struct work_struct charge_preallocation_work;
+ struct mutex socket_mutex;
+ atomic_t nr_outstanding_calls;
+ atomic_t nr_superblocks;
+
+ /* Cell database */
+ struct list_head cells;
+ struct afs_cell *ws_cell;
+ rwlock_t cells_lock;
+ struct rw_semaphore cells_sem;
+ wait_queue_head_t cells_freeable_wq;
+
+ struct rw_semaphore proc_cells_sem;
+ struct list_head proc_cells;
+
+ /* Volume location database */
+ struct list_head vl_updates; /* VL records in need-update order */
+ struct list_head vl_graveyard; /* Inactive VL records */
+ struct delayed_work vl_reaper;
+ struct delayed_work vl_updater;
+ spinlock_t vl_updates_lock;
+ spinlock_t vl_graveyard_lock;
+
+ /* File locking renewal management */
+ struct mutex lock_manager_mutex;
+
+ /* Server database */
+ struct rb_root servers; /* Active servers */
+ rwlock_t servers_lock;
+ struct list_head server_graveyard; /* Inactive server LRU list */
+ spinlock_t server_graveyard_lock;
+ struct delayed_work server_reaper;
+
+ /* Misc */
+ struct proc_dir_entry *proc_afs; /* /proc/net/afs directory */
+};
+
+extern struct afs_net __afs_net;// Dummy AFS network namespace; TODO: replace with real netns
+
+/*
* AFS cell record
*/
struct afs_cell {
atomic_t usage;
struct list_head link; /* main cell list link */
+ struct afs_net *net; /* The network namespace */
struct key *anonymous_key; /* anonymous user key for this cell */
struct list_head proc_link; /* /proc cell list link */
#ifdef CONFIG_AFS_FSCACHE
unsigned mtu; /* MTU of interface */
};
-struct afs_uuid {
- __be32 time_low; /* low part of timestamp */
- __be16 time_mid; /* mid part of timestamp */
- __be16 time_hi_and_version; /* high part of timestamp and version */
- __u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
- __u8 clock_seq_low; /* clock seq low */
- __u8 node[6]; /* spatially unique node ID (MAC addr) */
-};
-
/*****************************************************************************/
/*
* cache.c
/*
* callback.c
*/
+extern struct workqueue_struct *afs_callback_update_worker;
+
extern void afs_init_callback_state(struct afs_server *);
extern void afs_broken_callback_work(struct work_struct *);
extern void afs_break_callbacks(struct afs_server *, size_t,
extern void afs_give_up_callback(struct afs_vnode *);
extern void afs_dispatch_give_up_callbacks(struct work_struct *);
extern void afs_flush_callback_breaks(struct afs_server *);
-extern int __init afs_callback_update_init(void);
-extern void afs_callback_update_kill(void);
/*
* cell.c
*/
-extern struct rw_semaphore afs_proc_cells_sem;
-extern struct list_head afs_proc_cells;
-
#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
-extern int afs_cell_init(char *);
-extern struct afs_cell *afs_cell_create(const char *, unsigned, char *, bool);
-extern struct afs_cell *afs_cell_lookup(const char *, unsigned, bool);
+extern int afs_cell_init(struct afs_net *, char *);
+extern struct afs_cell *afs_cell_create(struct afs_net *, const char *, unsigned, char *, bool);
+extern struct afs_cell *afs_cell_lookup(struct afs_net *, const char *, unsigned, bool);
extern struct afs_cell *afs_grab_cell(struct afs_cell *);
extern void afs_put_cell(struct afs_cell *);
-extern void afs_cell_purge(void);
+extern void __net_exit afs_cell_purge(struct afs_net *);
/*
* cmservice.c
/*
* flock.c
*/
-extern void __exit afs_kill_lock_manager(void);
+extern struct workqueue_struct *afs_lock_manager;
+
extern void afs_lock_work(struct work_struct *);
extern void afs_lock_may_be_available(struct afs_vnode *);
extern int afs_lock(struct file *, int, struct file_lock *);
extern int afs_fs_fetch_file_status(struct afs_server *, struct key *,
struct afs_vnode *, struct afs_volsync *,
bool);
-extern int afs_fs_give_up_callbacks(struct afs_server *, bool);
+extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *, bool);
extern int afs_fs_fetch_data(struct afs_server *, struct key *,
struct afs_vnode *, struct afs_read *, bool);
extern int afs_fs_create(struct afs_server *, struct key *,
* main.c
*/
extern struct workqueue_struct *afs_wq;
-extern struct afs_uuid afs_uuid;
+
+static inline struct afs_net *afs_d2net(struct dentry *dentry)
+{
+ return &__afs_net;
+}
+
+static inline struct afs_net *afs_i2net(struct inode *inode)
+{
+ return &__afs_net;
+}
+
+static inline struct afs_net *afs_v2net(struct afs_vnode *vnode)
+{
+ return &__afs_net;
+}
+
+static inline struct afs_net *afs_sock2net(struct sock *sk)
+{
+ return &__afs_net;
+}
+
+static inline struct afs_net *afs_get_net(struct afs_net *net)
+{
+ return net;
+}
+
+static inline void afs_put_net(struct afs_net *net)
+{
+}
/*
* misc.c
/*
* proc.c
*/
-extern int afs_proc_init(void);
-extern void afs_proc_cleanup(void);
-extern int afs_proc_cell_setup(struct afs_cell *);
-extern void afs_proc_cell_remove(struct afs_cell *);
+extern int __net_init afs_proc_init(struct afs_net *);
+extern void __net_exit afs_proc_cleanup(struct afs_net *);
+extern int afs_proc_cell_setup(struct afs_net *, struct afs_cell *);
+extern void afs_proc_cell_remove(struct afs_net *, struct afs_cell *);
/*
* rxrpc.c
*/
-extern struct socket *afs_socket;
-extern atomic_t afs_outstanding_calls;
+extern struct workqueue_struct *afs_async_calls;
-extern int afs_open_socket(void);
-extern void afs_close_socket(void);
+extern int __net_init afs_open_socket(struct afs_net *);
+extern void __net_exit afs_close_socket(struct afs_net *);
+extern void afs_charge_preallocation(struct work_struct *);
extern void afs_put_call(struct afs_call *);
extern int afs_queue_call_work(struct afs_call *);
extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, bool);
-extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
+extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
+ const struct afs_call_type *,
size_t, size_t);
extern void afs_flat_call_destructor(struct afs_call *);
extern void afs_send_empty_reply(struct afs_call *);
extern struct afs_server *afs_lookup_server(struct afs_cell *,
const struct in_addr *);
-extern struct afs_server *afs_find_server(const struct sockaddr_rxrpc *);
+extern struct afs_server *afs_find_server(struct afs_net *,
+ const struct sockaddr_rxrpc *);
extern void afs_put_server(struct afs_server *);
-extern void __exit afs_purge_servers(void);
+extern void afs_reap_server(struct work_struct *);
+extern void __net_exit afs_purge_servers(struct afs_net *);
/*
* super.c
*/
-extern int afs_fs_init(void);
-extern void afs_fs_exit(void);
+extern int __init afs_fs_init(void);
+extern void __exit afs_fs_exit(void);
/*
* vlclient.c
*/
-extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *,
+extern int afs_vl_get_entry_by_name(struct afs_net *,
+ struct in_addr *, struct key *,
const char *, struct afs_cache_vlocation *,
bool);
-extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *,
+extern int afs_vl_get_entry_by_id(struct afs_net *,
+ struct in_addr *, struct key *,
afs_volid_t, afs_voltype_t,
struct afs_cache_vlocation *, bool);
/*
* vlocation.c
*/
+extern struct workqueue_struct *afs_vlocation_update_worker;
+
#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
-extern int __init afs_vlocation_update_init(void);
-extern struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *,
+extern struct afs_vlocation *afs_vlocation_lookup(struct afs_net *,
+ struct afs_cell *,
struct key *,
const char *, size_t);
-extern void afs_put_vlocation(struct afs_vlocation *);
-extern void afs_vlocation_purge(void);
+extern void afs_put_vlocation(struct afs_net *, struct afs_vlocation *);
+extern void afs_vlocation_updater(struct work_struct *);
+extern void afs_vlocation_reaper(struct work_struct *);
+extern void __net_exit afs_vlocation_purge(struct afs_net *);
/*
* vnode.c
*/
#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
-extern void afs_put_volume(struct afs_volume *);
+extern void afs_put_volume(struct afs_net *, struct afs_volume *);
extern struct afs_volume *afs_volume_lookup(struct afs_mount_params *);
extern struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *);
extern int afs_volume_release_fileserver(struct afs_vnode *,
module_param(rootcell, charp, 0);
MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
-struct afs_uuid afs_uuid;
struct workqueue_struct *afs_wq;
+struct afs_net __afs_net;
+
+/*
+ * Initialise an AFS network namespace record.
+ */
+static int __net_init afs_net_init(struct afs_net *net)
+{
+ int ret;
+
+ net->live = true;
+ generate_random_uuid((unsigned char *)&net->uuid);
+
+ INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation);
+ mutex_init(&net->socket_mutex);
+ INIT_LIST_HEAD(&net->cells);
+ rwlock_init(&net->cells_lock);
+ init_rwsem(&net->cells_sem);
+ init_waitqueue_head(&net->cells_freeable_wq);
+ init_rwsem(&net->proc_cells_sem);
+ INIT_LIST_HEAD(&net->proc_cells);
+ INIT_LIST_HEAD(&net->vl_updates);
+ INIT_LIST_HEAD(&net->vl_graveyard);
+ INIT_DELAYED_WORK(&net->vl_reaper, afs_vlocation_reaper);
+ INIT_DELAYED_WORK(&net->vl_updater, afs_vlocation_updater);
+ spin_lock_init(&net->vl_updates_lock);
+ spin_lock_init(&net->vl_graveyard_lock);
+ net->servers = RB_ROOT;
+ rwlock_init(&net->servers_lock);
+ INIT_LIST_HEAD(&net->server_graveyard);
+ spin_lock_init(&net->server_graveyard_lock);
+ INIT_DELAYED_WORK(&net->server_reaper, afs_reap_server);
+
+ /* Register the /proc stuff */
+ ret = afs_proc_init(net);
+ if (ret < 0)
+ goto error_proc;
+
+ /* Initialise the cell DB */
+ ret = afs_cell_init(net, rootcell);
+ if (ret < 0)
+ goto error_cell_init;
+
+ /* Create the RxRPC transport */
+ ret = afs_open_socket(net);
+ if (ret < 0)
+ goto error_open_socket;
+
+ return 0;
+
+error_open_socket:
+ afs_vlocation_purge(net);
+ afs_cell_purge(net);
+error_cell_init:
+ afs_proc_cleanup(net);
+error_proc:
+ return ret;
+}
+
+/*
+ * Clean up and destroy an AFS network namespace record.
+ */
+static void __net_exit afs_net_exit(struct afs_net *net)
+{
+ net->live = false;
+ afs_close_socket(net);
+ afs_purge_servers(net);
+ afs_vlocation_purge(net);
+ afs_cell_purge(net);
+ afs_proc_cleanup(net);
+}
/*
* initialise the AFS client FS module
*/
static int __init afs_init(void)
{
- int ret;
+ int ret = -ENOMEM;
printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
- generate_random_uuid((unsigned char *)&afs_uuid);
-
- /* create workqueue */
- ret = -ENOMEM;
afs_wq = alloc_workqueue("afs", 0, 0);
if (!afs_wq)
- return ret;
-
- /* register the /proc stuff */
- ret = afs_proc_init();
- if (ret < 0)
- goto error_proc;
+ goto error_afs_wq;
+ afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
+ if (!afs_async_calls)
+ goto error_async;
+ afs_vlocation_update_worker =
+ alloc_workqueue("kafs_vlupdated", WQ_MEM_RECLAIM, 0);
+ if (!afs_vlocation_update_worker)
+ goto error_vl_up;
+ afs_callback_update_worker =
+ alloc_ordered_workqueue("kafs_callbackd", WQ_MEM_RECLAIM);
+ if (!afs_callback_update_worker)
+ goto error_callback;
+ afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0);
+ if (!afs_lock_manager)
+ goto error_lockmgr;
#ifdef CONFIG_AFS_FSCACHE
/* we want to be able to cache */
goto error_cache;
#endif
- /* initialise the cell DB */
- ret = afs_cell_init(rootcell);
- if (ret < 0)
- goto error_cell_init;
-
- /* initialise the VL update process */
- ret = afs_vlocation_update_init();
- if (ret < 0)
- goto error_vl_update_init;
-
- /* initialise the callback update process */
- ret = afs_callback_update_init();
+ ret = afs_net_init(&__afs_net);
if (ret < 0)
- goto error_callback_update_init;
-
- /* create the RxRPC transport */
- ret = afs_open_socket();
- if (ret < 0)
- goto error_open_socket;
+ goto error_net;
/* register the filesystems */
ret = afs_fs_init();
return ret;
error_fs:
- afs_close_socket();
-error_open_socket:
- afs_callback_update_kill();
-error_callback_update_init:
- afs_vlocation_purge();
-error_vl_update_init:
- afs_cell_purge();
-error_cell_init:
+ afs_net_exit(&__afs_net);
+error_net:
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
error_cache:
#endif
- afs_proc_cleanup();
-error_proc:
+ destroy_workqueue(afs_lock_manager);
+error_lockmgr:
+ destroy_workqueue(afs_callback_update_worker);
+error_callback:
+ destroy_workqueue(afs_vlocation_update_worker);
+error_vl_up:
+ destroy_workqueue(afs_async_calls);
+error_async:
destroy_workqueue(afs_wq);
+error_afs_wq:
rcu_barrier();
printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
return ret;
printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
afs_fs_exit();
- afs_kill_lock_manager();
- afs_close_socket();
- afs_purge_servers();
- afs_callback_update_kill();
- afs_vlocation_purge();
- destroy_workqueue(afs_wq);
- afs_cell_purge();
+ afs_net_exit(&__afs_net);
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
#endif
- afs_proc_cleanup();
+ destroy_workqueue(afs_lock_manager);
+ destroy_workqueue(afs_callback_update_worker);
+ destroy_workqueue(afs_vlocation_update_worker);
+ destroy_workqueue(afs_async_calls);
+ destroy_workqueue(afs_wq);
rcu_barrier();
}
#include <linux/uaccess.h>
#include "internal.h"
-static struct proc_dir_entry *proc_afs;
+static inline struct afs_net *afs_proc2net(struct file *f)
+{
+ return &__afs_net;
+}
+static inline struct afs_net *afs_seq2net(struct seq_file *m)
+{
+ return &__afs_net; // TODO: use seq_file_net(m)
+}
static int afs_proc_cells_open(struct inode *inode, struct file *file);
static void *afs_proc_cells_start(struct seq_file *p, loff_t *pos);
/*
* initialise the /proc/fs/afs/ directory
*/
-int afs_proc_init(void)
+int afs_proc_init(struct afs_net *net)
{
_enter("");
- proc_afs = proc_mkdir("fs/afs", NULL);
- if (!proc_afs)
+ net->proc_afs = proc_mkdir("fs/afs", NULL);
+ if (!net->proc_afs)
goto error_dir;
- if (!proc_create("cells", 0644, proc_afs, &afs_proc_cells_fops) ||
- !proc_create("rootcell", 0644, proc_afs, &afs_proc_rootcell_fops))
+ if (!proc_create("cells", 0644, net->proc_afs, &afs_proc_cells_fops) ||
+ !proc_create("rootcell", 0644, net->proc_afs, &afs_proc_rootcell_fops))
goto error_tree;
_leave(" = 0");
return 0;
error_tree:
- remove_proc_subtree("fs/afs", NULL);
+ proc_remove(net->proc_afs);
error_dir:
_leave(" = -ENOMEM");
return -ENOMEM;
/*
* clean up the /proc/fs/afs/ directory
*/
-void afs_proc_cleanup(void)
+void afs_proc_cleanup(struct afs_net *net)
{
- remove_proc_subtree("fs/afs", NULL);
+ proc_remove(net->proc_afs);
+ net->proc_afs = NULL;
}
/*
*/
static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
{
- /* lock the list against modification */
- down_read(&afs_proc_cells_sem);
- return seq_list_start_head(&afs_proc_cells, *_pos);
+ struct afs_net *net = afs_seq2net(m);
+
+ down_read(&net->proc_cells_sem);
+ return seq_list_start_head(&net->proc_cells, *_pos);
}
/*
* move to next cell in cells list
*/
-static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
+static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
{
- return seq_list_next(v, &afs_proc_cells, pos);
+ struct afs_net *net = afs_seq2net(m);
+
+ return seq_list_next(v, &net->proc_cells, pos);
}
/*
* clean up after reading from the cells list
*/
-static void afs_proc_cells_stop(struct seq_file *p, void *v)
+static void afs_proc_cells_stop(struct seq_file *m, void *v)
{
- up_read(&afs_proc_cells_sem);
+ struct afs_net *net = afs_seq2net(m);
+
+ up_read(&net->proc_cells_sem);
}
/*
static int afs_proc_cells_show(struct seq_file *m, void *v)
{
struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
+ struct afs_net *net = afs_seq2net(m);
- if (v == &afs_proc_cells) {
+ if (v == &net->proc_cells) {
/* display header on line 1 */
seq_puts(m, "USE NAME\n");
return 0;
static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
size_t size, loff_t *_pos)
{
+ struct afs_net *net = afs_proc2net(file);
char *kbuf, *name, *args;
int ret;
if (strcmp(kbuf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_cell_create(name, strlen(name), args, false);
+ cell = afs_cell_create(net, name, strlen(name), args, false);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
const char __user *buf,
size_t size, loff_t *_pos)
{
+ struct afs_net *net = afs_proc2net(file);
char *kbuf, *s;
int ret;
/* determine command to perform */
_debug("rootcell=%s", kbuf);
- ret = afs_cell_init(kbuf);
+ ret = afs_cell_init(net, kbuf);
if (ret >= 0)
ret = size; /* consume everything, always */
/*
* initialise /proc/fs/afs/<cell>/
*/
-int afs_proc_cell_setup(struct afs_cell *cell)
+int afs_proc_cell_setup(struct afs_net *net, struct afs_cell *cell)
{
struct proc_dir_entry *dir;
_enter("%p{%s}", cell, cell->name);
- dir = proc_mkdir(cell->name, proc_afs);
+ dir = proc_mkdir(cell->name, net->proc_afs);
if (!dir)
goto error_dir;
return 0;
error_tree:
- remove_proc_subtree(cell->name, proc_afs);
+ remove_proc_subtree(cell->name, net->proc_afs);
error_dir:
_leave(" = -ENOMEM");
return -ENOMEM;
/*
* remove /proc/fs/afs/<cell>/
*/
-void afs_proc_cell_remove(struct afs_cell *cell)
+void afs_proc_cell_remove(struct afs_net *net, struct afs_cell *cell)
{
_enter("");
- remove_proc_subtree(cell->name, proc_afs);
+ remove_proc_subtree(cell->name, net->proc_afs);
_leave("");
}
#include "internal.h"
#include "afs_cm.h"
-struct socket *afs_socket; /* my RxRPC socket */
-static struct workqueue_struct *afs_async_calls;
-static struct afs_call *afs_spare_incoming_call;
-atomic_t afs_outstanding_calls;
+struct workqueue_struct *afs_async_calls;
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static int afs_wait_for_call_to_complete(struct afs_call *);
.abort_to_error = afs_abort_to_error,
};
-static void afs_charge_preallocation(struct work_struct *);
-
-static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation);
-
/*
* open an RxRPC socket and bind it to be a server for callback notifications
* - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
*/
-int afs_open_socket(void)
+int afs_open_socket(struct afs_net *net)
{
struct sockaddr_rxrpc srx;
struct socket *socket;
_enter("");
- ret = -ENOMEM;
- afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
- if (!afs_async_calls)
- goto error_0;
-
ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
if (ret < 0)
goto error_1;
if (ret < 0)
goto error_2;
- afs_socket = socket;
- afs_charge_preallocation(NULL);
+ net->socket = socket;
+ afs_charge_preallocation(&net->charge_preallocation_work);
_leave(" = 0");
return 0;
error_2:
sock_release(socket);
error_1:
- destroy_workqueue(afs_async_calls);
-error_0:
_leave(" = %d", ret);
return ret;
}
/*
* close the RxRPC socket AFS was using
*/
-void afs_close_socket(void)
+void afs_close_socket(struct afs_net *net)
{
_enter("");
- kernel_listen(afs_socket, 0);
+ kernel_listen(net->socket, 0);
flush_workqueue(afs_async_calls);
- if (afs_spare_incoming_call) {
- afs_put_call(afs_spare_incoming_call);
- afs_spare_incoming_call = NULL;
+ if (net->spare_incoming_call) {
+ afs_put_call(net->spare_incoming_call);
+ net->spare_incoming_call = NULL;
}
- _debug("outstanding %u", atomic_read(&afs_outstanding_calls));
- wait_on_atomic_t(&afs_outstanding_calls, atomic_t_wait,
+ _debug("outstanding %u", atomic_read(&net->nr_outstanding_calls));
+ wait_on_atomic_t(&net->nr_outstanding_calls, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
_debug("no outstanding calls");
- kernel_sock_shutdown(afs_socket, SHUT_RDWR);
+ kernel_sock_shutdown(net->socket, SHUT_RDWR);
flush_workqueue(afs_async_calls);
- sock_release(afs_socket);
+ sock_release(net->socket);
_debug("dework");
- destroy_workqueue(afs_async_calls);
_leave("");
}
/*
* Allocate a call.
*/
-static struct afs_call *afs_alloc_call(const struct afs_call_type *type,
+static struct afs_call *afs_alloc_call(struct afs_net *net,
+ const struct afs_call_type *type,
gfp_t gfp)
{
struct afs_call *call;
return NULL;
call->type = type;
+ call->net = net;
atomic_set(&call->usage, 1);
INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq);
- o = atomic_inc_return(&afs_outstanding_calls);
+ o = atomic_inc_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_alloc, 1, o,
__builtin_return_address(0));
return call;
*/
void afs_put_call(struct afs_call *call)
{
+ struct afs_net *net = call->net;
int n = atomic_dec_return(&call->usage);
- int o = atomic_read(&afs_outstanding_calls);
+ int o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_put, n + 1, o,
__builtin_return_address(0));
ASSERT(call->type->name != NULL);
if (call->rxcall) {
- rxrpc_kernel_end_call(afs_socket, call->rxcall);
+ rxrpc_kernel_end_call(net->socket, call->rxcall);
call->rxcall = NULL;
}
if (call->type->destructor)
kfree(call->request);
kfree(call);
- o = atomic_dec_return(&afs_outstanding_calls);
+ o = atomic_dec_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_free, 0, o,
__builtin_return_address(0));
if (o == 0)
- wake_up_atomic_t(&afs_outstanding_calls);
+ wake_up_atomic_t(&net->nr_outstanding_calls);
}
}
int u = atomic_inc_return(&call->usage);
trace_afs_call(call, afs_call_trace_work, u,
- atomic_read(&afs_outstanding_calls),
+ atomic_read(&call->net->nr_outstanding_calls),
__builtin_return_address(0));
INIT_WORK(&call->work, call->type->work);
/*
* allocate a call with flat request and reply buffers
*/
-struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
+struct afs_call *afs_alloc_flat_call(struct afs_net *net,
+ const struct afs_call_type *type,
size_t request_size, size_t reply_max)
{
struct afs_call *call;
- call = afs_alloc_call(type, GFP_NOFS);
+ call = afs_alloc_call(net, type, GFP_NOFS);
if (!call)
goto nomem_call;
bytes = msg->msg_iter.count;
nr = msg->msg_iter.nr_segs;
- ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, msg,
+ ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg,
bytes, afs_notify_end_request_tx);
for (loop = 0; loop < nr; loop++)
put_page(bv[loop].bv_page);
_debug("____MAKE %p{%s,%x} [%d]____",
call, call->type->name, key_serial(call->key),
- atomic_read(&afs_outstanding_calls));
+ atomic_read(&call->net->nr_outstanding_calls));
call->async = async;
}
/* create a call */
- rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
+ rxcall = rxrpc_kernel_begin_call(call->net->socket, &srx, call->key,
(unsigned long)call,
tx_total_len, gfp,
(async ?
*/
if (!call->send_pages)
call->state = AFS_CALL_AWAIT_REPLY;
- ret = rxrpc_kernel_send_data(afs_socket, rxcall,
+ ret = rxrpc_kernel_send_data(call->net->socket, rxcall,
&msg, call->request_size,
afs_notify_end_request_tx);
if (ret < 0)
error_do_abort:
call->state = AFS_CALL_COMPLETE;
if (ret != -ECONNABORTED) {
- rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
- ret, "KSD");
+ rxrpc_kernel_abort_call(call->net->socket, rxcall,
+ RX_USER_ABORT, ret, "KSD");
} else {
abort_code = 0;
offset = 0;
- rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
- false, &abort_code, &call->service_id);
+ rxrpc_kernel_recv_data(call->net->socket, rxcall, NULL,
+ 0, &offset, false, &abort_code,
+ &call->service_id);
ret = call->type->abort_to_error(abort_code);
}
error_kill_call:
) {
if (call->state == AFS_CALL_AWAIT_ACK) {
size_t offset = 0;
- ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
+ ret = rxrpc_kernel_recv_data(call->net->socket,
+ call->rxcall,
NULL, 0, &offset, false,
&call->abort_code,
&call->service_id);
goto call_complete;
case -ENOTCONN:
abort_code = RX_CALL_DEAD;
- rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret, "KNC");
goto save_error;
case -ENOTSUPP:
abort_code = RXGEN_OPCODE;
- rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret, "KIV");
goto save_error;
case -ENODATA:
abort_code = RXGEN_CC_UNMARSHAL;
if (call->state != AFS_CALL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
- rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, -EBADMSG, "KUM");
goto save_error;
}
_enter("");
- rtt = rxrpc_kernel_get_rtt(afs_socket, call->rxcall);
+ rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
rtt2 = nsecs_to_jiffies64(rtt) * 2;
if (rtt2 < 2)
rtt2 = 2;
timeout = rtt2;
- last_life = rxrpc_kernel_check_life(afs_socket, call->rxcall);
+ last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
add_wait_queue(&call->waitq, &myself);
for (;;) {
if (call->state == AFS_CALL_COMPLETE)
break;
- life = rxrpc_kernel_check_life(afs_socket, call->rxcall);
+ life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
if (timeout == 0 &&
life == last_life && signal_pending(current))
break;
/* Kill off the call if it's still live. */
if (call->state < AFS_CALL_COMPLETE) {
_debug("call interrupted");
- rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
RX_USER_ABORT, -EINTR, "KWI");
}
u = __atomic_add_unless(&call->usage, 1, 0);
if (u != 0) {
trace_afs_call(call, afs_call_trace_wake, u,
- atomic_read(&afs_outstanding_calls),
+ atomic_read(&call->net->nr_outstanding_calls),
__builtin_return_address(0));
if (!queue_work(afs_async_calls, &call->async_work))
/*
* Charge the incoming call preallocation.
*/
-static void afs_charge_preallocation(struct work_struct *work)
+void afs_charge_preallocation(struct work_struct *work)
{
- struct afs_call *call = afs_spare_incoming_call;
+ struct afs_net *net =
+ container_of(work, struct afs_net, charge_preallocation_work);
+ struct afs_call *call = net->spare_incoming_call;
for (;;) {
if (!call) {
- call = afs_alloc_call(&afs_RXCMxxxx, GFP_KERNEL);
+ call = afs_alloc_call(net, &afs_RXCMxxxx, GFP_KERNEL);
if (!call)
break;
init_waitqueue_head(&call->waitq);
}
- if (rxrpc_kernel_charge_accept(afs_socket,
+ if (rxrpc_kernel_charge_accept(net->socket,
afs_wake_up_async_call,
afs_rx_attach,
(unsigned long)call,
break;
call = NULL;
}
- afs_spare_incoming_call = call;
+ net->spare_incoming_call = call;
}
/*
static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
- queue_work(afs_wq, &afs_charge_preallocation_work);
+ struct afs_net *net = afs_sock2net(sk);
+
+ queue_work(afs_wq, &net->charge_preallocation_work);
}
/*
*/
void afs_send_empty_reply(struct afs_call *call)
{
+ struct afs_net *net = call->net;
struct msghdr msg;
_enter("");
- rxrpc_kernel_set_tx_length(afs_socket, call->rxcall, 0);
+ rxrpc_kernel_set_tx_length(net->socket, call->rxcall, 0);
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_flags = 0;
call->state = AFS_CALL_AWAIT_ACK;
- switch (rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, 0,
+ switch (rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, 0,
afs_notify_end_reply_tx)) {
case 0:
_leave(" [replied]");
case -ENOMEM:
_debug("oom");
- rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ rxrpc_kernel_abort_call(net->socket, call->rxcall,
RX_USER_ABORT, -ENOMEM, "KOO");
default:
_leave(" [error]");
*/
void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
+ struct afs_net *net = call->net;
struct msghdr msg;
struct kvec iov[1];
int n;
_enter("");
- rxrpc_kernel_set_tx_length(afs_socket, call->rxcall, len);
+ rxrpc_kernel_set_tx_length(net->socket, call->rxcall, len);
iov[0].iov_base = (void *) buf;
iov[0].iov_len = len;
msg.msg_flags = 0;
call->state = AFS_CALL_AWAIT_ACK;
- n = rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, len,
+ n = rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, len,
afs_notify_end_reply_tx);
if (n >= 0) {
/* Success */
if (n == -ENOMEM) {
_debug("oom");
- rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ rxrpc_kernel_abort_call(net->socket, call->rxcall,
RX_USER_ABORT, -ENOMEM, "KOO");
}
_leave(" [error]");
int afs_extract_data(struct afs_call *call, void *buf, size_t count,
bool want_more)
{
+ struct afs_net *net = call->net;
int ret;
_enter("{%s,%zu},,%zu,%d",
ASSERTCMP(call->offset, <=, count);
- ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
+ ret = rxrpc_kernel_recv_data(net->socket, call->rxcall,
buf, count, &call->offset,
want_more, &call->abort_code,
&call->service_id);
static unsigned afs_server_timeout = 10; /* server timeout in seconds */
-static void afs_reap_server(struct work_struct *);
-
-/* tree of all the servers, indexed by IP address */
-static struct rb_root afs_servers = RB_ROOT;
-static DEFINE_RWLOCK(afs_servers_lock);
-
-/* LRU list of all the servers not currently in use */
-static LIST_HEAD(afs_server_graveyard);
-static DEFINE_SPINLOCK(afs_server_graveyard_lock);
-static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server);
-
/*
* install a server record in the master tree
*/
static int afs_install_server(struct afs_server *server)
{
struct afs_server *xserver;
+ struct afs_net *net = server->cell->net;
struct rb_node **pp, *p;
int ret;
_enter("%p", server);
- write_lock(&afs_servers_lock);
+ write_lock(&net->servers_lock);
ret = -EEXIST;
- pp = &afs_servers.rb_node;
+ pp = &net->servers.rb_node;
p = NULL;
while (*pp) {
p = *pp;
}
rb_link_node(&server->master_rb, p, pp);
- rb_insert_color(&server->master_rb, &afs_servers);
+ rb_insert_color(&server->master_rb, &net->servers);
ret = 0;
error:
- write_unlock(&afs_servers_lock);
+ write_unlock(&net->servers_lock);
return ret;
}
read_unlock(&cell->servers_lock);
no_longer_unused:
if (!list_empty(&server->grave)) {
- spin_lock(&afs_server_graveyard_lock);
+ spin_lock(&cell->net->server_graveyard_lock);
list_del_init(&server->grave);
- spin_unlock(&afs_server_graveyard_lock);
+ spin_unlock(&cell->net->server_graveyard_lock);
}
_leave(" = %p{%d}", server, atomic_read(&server->usage));
return server;
/*
* look up a server by its IP address
*/
-struct afs_server *afs_find_server(const struct sockaddr_rxrpc *srx)
+struct afs_server *afs_find_server(struct afs_net *net,
+ const struct sockaddr_rxrpc *srx)
{
struct afs_server *server = NULL;
struct rb_node *p;
return NULL;
}
- read_lock(&afs_servers_lock);
+ read_lock(&net->servers_lock);
- p = afs_servers.rb_node;
+ p = net->servers.rb_node;
while (p) {
server = rb_entry(p, struct afs_server, master_rb);
server = NULL;
found:
- read_unlock(&afs_servers_lock);
+ read_unlock(&net->servers_lock);
ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr);
_leave(" = %p", server);
return server;
*/
void afs_put_server(struct afs_server *server)
{
+ struct afs_net *net = server->cell->net;
+
if (!server)
return;
afs_flush_callback_breaks(server);
- spin_lock(&afs_server_graveyard_lock);
+ spin_lock(&net->server_graveyard_lock);
if (atomic_read(&server->usage) == 0) {
- list_move_tail(&server->grave, &afs_server_graveyard);
+ list_move_tail(&server->grave, &net->server_graveyard);
server->time_of_death = ktime_get_real_seconds();
- queue_delayed_work(afs_wq, &afs_server_reaper,
- afs_server_timeout * HZ);
+ queue_delayed_work(afs_wq, &net->server_reaper,
+ net->live ? afs_server_timeout * HZ : 0);
}
- spin_unlock(&afs_server_graveyard_lock);
+ spin_unlock(&net->server_graveyard_lock);
_leave(" [dead]");
}
/*
* reap dead server records
*/
-static void afs_reap_server(struct work_struct *work)
+void afs_reap_server(struct work_struct *work)
{
LIST_HEAD(corpses);
struct afs_server *server;
+ struct afs_net *net = container_of(work, struct afs_net, server_reaper.work);
unsigned long delay, expiry;
time64_t now;
now = ktime_get_real_seconds();
- spin_lock(&afs_server_graveyard_lock);
+ spin_lock(&net->server_graveyard_lock);
- while (!list_empty(&afs_server_graveyard)) {
- server = list_entry(afs_server_graveyard.next,
+ while (!list_empty(&net->server_graveyard)) {
+ server = list_entry(net->server_graveyard.next,
struct afs_server, grave);
/* the queue is ordered most dead first */
- expiry = server->time_of_death + afs_server_timeout;
- if (expiry > now) {
- delay = (expiry - now) * HZ;
- mod_delayed_work(afs_wq, &afs_server_reaper, delay);
- break;
+ if (net->live) {
+ expiry = server->time_of_death + afs_server_timeout;
+ if (expiry > now) {
+ delay = (expiry - now) * HZ;
+ mod_delayed_work(afs_wq, &net->server_reaper, delay);
+ break;
+ }
}
write_lock(&server->cell->servers_lock);
- write_lock(&afs_servers_lock);
+ write_lock(&net->servers_lock);
if (atomic_read(&server->usage) > 0) {
list_del_init(&server->grave);
} else {
list_move_tail(&server->grave, &corpses);
list_del_init(&server->link);
- rb_erase(&server->master_rb, &afs_servers);
+ rb_erase(&server->master_rb, &net->servers);
}
- write_unlock(&afs_servers_lock);
+ write_unlock(&net->servers_lock);
write_unlock(&server->cell->servers_lock);
}
- spin_unlock(&afs_server_graveyard_lock);
+ spin_unlock(&net->server_graveyard_lock);
/* now reap the corpses we've extracted */
while (!list_empty(&corpses)) {
}
/*
- * discard all the server records for rmmod
+ * Discard all the server records from a net namespace when it is destroyed or
+ * the afs module is removed.
*/
-void __exit afs_purge_servers(void)
+void __net_exit afs_purge_servers(struct afs_net *net)
{
- afs_server_timeout = 0;
- mod_delayed_work(afs_wq, &afs_server_reaper, 0);
+ mod_delayed_work(afs_wq, &net->server_reaper, 0);
}
#include <linux/statfs.h>
#include <linux/sched.h>
#include <linux/nsproxy.h>
+#include <linux/magic.h>
#include <net/net_namespace.h>
#include "internal.h"
-#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
-
static void afs_i_init_once(void *foo);
static struct dentry *afs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
token = match_token(p, afs_options_list, args);
switch (token) {
case afs_opt_cell:
- cell = afs_cell_lookup(args[0].from,
+ cell = afs_cell_lookup(params->net,
+ args[0].from,
args[0].to - args[0].from,
false);
if (IS_ERR(cell))
/* lookup the cell record */
if (cellname || !params->cell) {
- cell = afs_cell_lookup(cellname, cellnamesz, true);
+ cell = afs_cell_lookup(params->net, cellname, cellnamesz, true);
if (IS_ERR(cell)) {
printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
struct afs_super_info *as1 = data;
struct afs_super_info *as = sb->s_fs_info;
- return as->volume == as1->volume;
+ return as->net == as1->net && as->volume == as1->volume;
}
static int afs_set_super(struct super_block *sb, void *data)
_enter(",,%s,%p", dev_name, options);
memset(¶ms, 0, sizeof(params));
+ params.net = &__afs_net;
ret = -EINVAL;
if (current->nsproxy->net_ns != &init_net)
}
/* allocate a superblock info record */
+ ret = -ENOMEM;
as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
- if (!as) {
- ret = -ENOMEM;
- afs_put_volume(vol);
- goto error;
- }
+ if (!as)
+ goto error_vol;
+
+ as->net = afs_get_net(params.net);
as->volume = vol;
/* allocate a deviceless superblock */
sb = sget(fs_type, afs_test_super, afs_set_super, flags, as);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
- afs_put_volume(vol);
- kfree(as);
- goto error;
+ goto error_as;
}
if (!sb->s_root) {
/* initial superblock/root creation */
_debug("create");
ret = afs_fill_super(sb, ¶ms);
- if (ret < 0) {
- deactivate_locked_super(sb);
- goto error;
- }
+ if (ret < 0)
+ goto error_sb;
sb->s_flags |= MS_ACTIVE;
} else {
_debug("reuse");
ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
- afs_put_volume(vol);
+ afs_put_volume(params.net, vol);
kfree(as);
}
_leave(" = 0 [%p]", sb);
return dget(sb->s_root);
+error_sb:
+ deactivate_locked_super(sb);
+ goto error;
+error_as:
+ afs_put_net(as->net);
+ kfree(as);
+error_vol:
+ afs_put_volume(params.net, vol);
error:
afs_put_cell(params.cell);
key_put(params.key);
static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = sb->s_fs_info;
+ struct afs_net *net = as->net;
+
kill_anon_super(sb);
- afs_put_volume(as->volume);
+ afs_put_volume(net, as->volume);
kfree(as);
}
/*
* dispatch a get volume entry by name operation
*/
-int afs_vl_get_entry_by_name(struct in_addr *addr,
+int afs_vl_get_entry_by_name(struct afs_net *net,
+ struct in_addr *addr,
struct key *key,
const char *volname,
struct afs_cache_vlocation *entry,
padsz = (4 - (volnamesz & 3)) & 3;
reqsz = 8 + volnamesz + padsz;
- call = afs_alloc_flat_call(&afs_RXVLGetEntryByName, reqsz, 384);
+ call = afs_alloc_flat_call(net, &afs_RXVLGetEntryByName, reqsz, 384);
if (!call)
return -ENOMEM;
/*
* dispatch a get volume entry by ID operation
*/
-int afs_vl_get_entry_by_id(struct in_addr *addr,
+int afs_vl_get_entry_by_id(struct afs_net *net,
+ struct in_addr *addr,
struct key *key,
afs_volid_t volid,
afs_voltype_t voltype,
_enter("");
- call = afs_alloc_flat_call(&afs_RXVLGetEntryById, 12, 384);
+ call = afs_alloc_flat_call(net, &afs_RXVLGetEntryById, 12, 384);
if (!call)
return -ENOMEM;
#include <linux/sched.h>
#include "internal.h"
+struct workqueue_struct *afs_vlocation_update_worker;
+
static unsigned afs_vlocation_timeout = 10; /* volume location timeout in seconds */
static unsigned afs_vlocation_update_timeout = 10 * 60;
-static void afs_vlocation_reaper(struct work_struct *);
-static void afs_vlocation_updater(struct work_struct *);
-
-static LIST_HEAD(afs_vlocation_updates);
-static LIST_HEAD(afs_vlocation_graveyard);
-static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
-static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
-static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
-static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
-static struct workqueue_struct *afs_vlocation_update_worker;
-
/*
* iterate through the VL servers in a cell until one of them admits knowing
* about the volume in question
_debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
/* attempt to access the VL server */
- ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
- false);
+ ret = afs_vl_get_entry_by_name(cell->net, &addr, key,
+ vl->vldb.name, vldb, false);
switch (ret) {
case 0:
goto out;
_debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
/* attempt to access the VL server */
- ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
- false);
+ ret = afs_vl_get_entry_by_id(cell->net, &addr, key, volid,
+ voltype, vldb, false);
switch (ret) {
case 0:
goto out;
/*
* queue a vlocation record for updates
*/
-static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
+static void afs_vlocation_queue_for_updates(struct afs_net *net,
+ struct afs_vlocation *vl)
{
struct afs_vlocation *xvl;
vl->update_at = ktime_get_real_seconds() +
afs_vlocation_update_timeout;
- spin_lock(&afs_vlocation_updates_lock);
+ spin_lock(&net->vl_updates_lock);
- if (!list_empty(&afs_vlocation_updates)) {
+ if (!list_empty(&net->vl_updates)) {
/* ... but wait at least 1 second more than the newest record
* already queued so that we don't spam the VL server suddenly
* with lots of requests
*/
- xvl = list_entry(afs_vlocation_updates.prev,
+ xvl = list_entry(net->vl_updates.prev,
struct afs_vlocation, update);
if (vl->update_at <= xvl->update_at)
vl->update_at = xvl->update_at + 1;
- } else {
+ } else if (net->live) {
queue_delayed_work(afs_vlocation_update_worker,
- &afs_vlocation_update,
+ &net->vl_updater,
afs_vlocation_update_timeout * HZ);
}
- list_add_tail(&vl->update, &afs_vlocation_updates);
- spin_unlock(&afs_vlocation_updates_lock);
+ list_add_tail(&vl->update, &net->vl_updates);
+ spin_unlock(&net->vl_updates_lock);
}
/*
* - lookup in the local cache if not able to find on the VL server
* - insert/update in the local cache if did get a VL response
*/
-struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
+struct afs_vlocation *afs_vlocation_lookup(struct afs_net *net,
+ struct afs_cell *cell,
struct key *key,
const char *name,
size_t namesz)
#endif
/* schedule for regular updates */
- afs_vlocation_queue_for_updates(vl);
+ afs_vlocation_queue_for_updates(net, vl);
goto success;
found_in_memory:
atomic_inc(&vl->usage);
spin_unlock(&cell->vl_lock);
if (!list_empty(&vl->grave)) {
- spin_lock(&afs_vlocation_graveyard_lock);
+ spin_lock(&net->vl_graveyard_lock);
list_del_init(&vl->grave);
- spin_unlock(&afs_vlocation_graveyard_lock);
+ spin_unlock(&net->vl_graveyard_lock);
}
up_write(&cell->vl_sem);
wake_up(&vl->waitq);
error:
ASSERT(vl != NULL);
- afs_put_vlocation(vl);
+ afs_put_vlocation(net, vl);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
* finish using a volume location record
*/
-void afs_put_vlocation(struct afs_vlocation *vl)
+void afs_put_vlocation(struct afs_net *net, struct afs_vlocation *vl)
{
if (!vl)
return;
return;
}
- spin_lock(&afs_vlocation_graveyard_lock);
+ spin_lock(&net->vl_graveyard_lock);
if (atomic_read(&vl->usage) == 0) {
_debug("buried");
- list_move_tail(&vl->grave, &afs_vlocation_graveyard);
+ list_move_tail(&vl->grave, &net->vl_graveyard);
vl->time_of_death = ktime_get_real_seconds();
- queue_delayed_work(afs_wq, &afs_vlocation_reap,
+ queue_delayed_work(afs_wq, &net->vl_reaper,
afs_vlocation_timeout * HZ);
/* suspend updates on this record */
if (!list_empty(&vl->update)) {
- spin_lock(&afs_vlocation_updates_lock);
+ spin_lock(&net->vl_updates_lock);
list_del_init(&vl->update);
- spin_unlock(&afs_vlocation_updates_lock);
+ spin_unlock(&net->vl_updates_lock);
}
}
- spin_unlock(&afs_vlocation_graveyard_lock);
+ spin_unlock(&net->vl_graveyard_lock);
_leave(" [killed?]");
}
/*
* reap dead volume location records
*/
-static void afs_vlocation_reaper(struct work_struct *work)
+void afs_vlocation_reaper(struct work_struct *work)
{
LIST_HEAD(corpses);
struct afs_vlocation *vl;
+ struct afs_net *net = container_of(work, struct afs_net, vl_reaper.work);
unsigned long delay, expiry;
time64_t now;
_enter("");
now = ktime_get_real_seconds();
- spin_lock(&afs_vlocation_graveyard_lock);
+ spin_lock(&net->vl_graveyard_lock);
- while (!list_empty(&afs_vlocation_graveyard)) {
- vl = list_entry(afs_vlocation_graveyard.next,
+ while (!list_empty(&net->vl_graveyard)) {
+ vl = list_entry(net->vl_graveyard.next,
struct afs_vlocation, grave);
_debug("check %p", vl);
/* the queue is ordered most dead first */
- expiry = vl->time_of_death + afs_vlocation_timeout;
- if (expiry > now) {
- delay = (expiry - now) * HZ;
- _debug("delay %lu", delay);
- mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
- break;
+ if (net->live) {
+ expiry = vl->time_of_death + afs_vlocation_timeout;
+ if (expiry > now) {
+ delay = (expiry - now) * HZ;
+ _debug("delay %lu", delay);
+ mod_delayed_work(afs_wq, &net->vl_reaper, delay);
+ break;
+ }
}
spin_lock(&vl->cell->vl_lock);
spin_unlock(&vl->cell->vl_lock);
}
- spin_unlock(&afs_vlocation_graveyard_lock);
+ spin_unlock(&net->vl_graveyard_lock);
/* now reap the corpses we've extracted */
while (!list_empty(&corpses)) {
}
/*
- * initialise the VL update process
- */
-int __init afs_vlocation_update_init(void)
-{
- afs_vlocation_update_worker = alloc_workqueue("kafs_vlupdated",
- WQ_MEM_RECLAIM, 0);
- return afs_vlocation_update_worker ? 0 : -ENOMEM;
-}
-
-/*
* discard all the volume location records for rmmod
*/
-void afs_vlocation_purge(void)
+void __net_exit afs_vlocation_purge(struct afs_net *net)
{
- afs_vlocation_timeout = 0;
-
- spin_lock(&afs_vlocation_updates_lock);
- list_del_init(&afs_vlocation_updates);
- spin_unlock(&afs_vlocation_updates_lock);
- mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
- destroy_workqueue(afs_vlocation_update_worker);
-
- mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
+ spin_lock(&net->vl_updates_lock);
+ list_del_init(&net->vl_updates);
+ spin_unlock(&net->vl_updates_lock);
+ mod_delayed_work(afs_vlocation_update_worker, &net->vl_updater, 0);
+ mod_delayed_work(afs_wq, &net->vl_reaper, 0);
}
/*
* update a volume location
*/
-static void afs_vlocation_updater(struct work_struct *work)
+void afs_vlocation_updater(struct work_struct *work)
{
struct afs_cache_vlocation vldb;
struct afs_vlocation *vl, *xvl;
+ struct afs_net *net = container_of(work, struct afs_net, vl_updater.work);
time64_t now;
long timeout;
int ret;
+ if (!net->live)
+ return;
+
_enter("");
now = ktime_get_real_seconds();
/* find a record to update */
- spin_lock(&afs_vlocation_updates_lock);
+ spin_lock(&net->vl_updates_lock);
for (;;) {
- if (list_empty(&afs_vlocation_updates)) {
- spin_unlock(&afs_vlocation_updates_lock);
+ if (list_empty(&net->vl_updates) || !net->live) {
+ spin_unlock(&net->vl_updates_lock);
_leave(" [nothing]");
return;
}
- vl = list_entry(afs_vlocation_updates.next,
+ vl = list_entry(net->vl_updates.next,
struct afs_vlocation, update);
if (atomic_read(&vl->usage) > 0)
break;
timeout = vl->update_at - now;
if (timeout > 0) {
queue_delayed_work(afs_vlocation_update_worker,
- &afs_vlocation_update, timeout * HZ);
- spin_unlock(&afs_vlocation_updates_lock);
+ &net->vl_updater, timeout * HZ);
+ spin_unlock(&net->vl_updates_lock);
_leave(" [nothing]");
return;
}
list_del_init(&vl->update);
atomic_inc(&vl->usage);
- spin_unlock(&afs_vlocation_updates_lock);
+ spin_unlock(&net->vl_updates_lock);
/* we can now perform the update */
_debug("update %s", vl->vldb.name);
vl->update_at = ktime_get_real_seconds() +
afs_vlocation_update_timeout;
- spin_lock(&afs_vlocation_updates_lock);
+ spin_lock(&net->vl_updates_lock);
- if (!list_empty(&afs_vlocation_updates)) {
+ if (!list_empty(&net->vl_updates)) {
/* next update in 10 minutes, but wait at least 1 second more
* than the newest record already queued so that we don't spam
* the VL server suddenly with lots of requests
*/
- xvl = list_entry(afs_vlocation_updates.prev,
+ xvl = list_entry(net->vl_updates.prev,
struct afs_vlocation, update);
if (vl->update_at <= xvl->update_at)
vl->update_at = xvl->update_at + 1;
- xvl = list_entry(afs_vlocation_updates.next,
+ xvl = list_entry(net->vl_updates.next,
struct afs_vlocation, update);
timeout = xvl->update_at - now;
if (timeout < 0)
ASSERT(list_empty(&vl->update));
- list_add_tail(&vl->update, &afs_vlocation_updates);
+ list_add_tail(&vl->update, &net->vl_updates);
_debug("timeout %ld", timeout);
- queue_delayed_work(afs_vlocation_update_worker,
- &afs_vlocation_update, timeout * HZ);
- spin_unlock(&afs_vlocation_updates_lock);
- afs_put_vlocation(vl);
+ queue_delayed_work(afs_vlocation_update_worker, &net->vl_updater, timeout * HZ);
+ spin_unlock(&net->vl_updates_lock);
+ afs_put_vlocation(net, vl);
}
params->volnamesz, params->volnamesz, params->volname, params->rwpath);
/* lookup the volume location record */
- vlocation = afs_vlocation_lookup(params->cell, params->key,
+ vlocation = afs_vlocation_lookup(params->net, params->cell, params->key,
params->volname, params->volnamesz);
if (IS_ERR(vlocation)) {
ret = PTR_ERR(vlocation);
_debug("kAFS selected %s volume %08x",
afs_voltypes[volume->type], volume->vid);
up_write(¶ms->cell->vl_sem);
- afs_put_vlocation(vlocation);
+ afs_put_vlocation(params->net, vlocation);
_leave(" = %p", volume);
return volume;
error_up:
up_write(¶ms->cell->vl_sem);
error:
- afs_put_vlocation(vlocation);
+ afs_put_vlocation(params->net, vlocation);
_leave(" = %d", ret);
return ERR_PTR(ret);
/*
* destroy a volume record
*/
-void afs_put_volume(struct afs_volume *volume)
+void afs_put_volume(struct afs_net *net, struct afs_volume *volume)
{
struct afs_vlocation *vlocation;
int loop;
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(volume->cache, 0);
#endif
- afs_put_vlocation(vlocation);
+ afs_put_vlocation(net, vlocation);
for (loop = volume->nservers - 1; loop >= 0; loop--)
afs_put_server(volume->servers[loop]);
#define OPENPROM_SUPER_MAGIC 0x9fa1
#define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */
#define QNX6_SUPER_MAGIC 0x68191122 /* qnx6 fs detection */
+#define AFS_FS_MAGIC 0x6B414653
#define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */
/* used by file system utilities that