staging: pohmelfs: remove drivers/staging/pohmelfs

New pohmelfs is coming, and it is time to remove deadly old design
https://lkml.org/lkml/2012/2/8/293

Signed-off-by: Evgeniy Polyakov <zbr@ioremap.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Evgeniy Polyakov 2012-02-09 02:44:50 +03:00 committed by Greg Kroah-Hartman
parent 203209ef77
commit 6743531986
14 changed files with 0 additions and 7979 deletions

View File

@ -60,8 +60,6 @@ source "drivers/staging/rts5139/Kconfig"
source "drivers/staging/frontier/Kconfig"
source "drivers/staging/pohmelfs/Kconfig"
source "drivers/staging/phison/Kconfig"
source "drivers/staging/line6/Kconfig"

View File

@ -22,7 +22,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/

View File

@ -1,20 +0,0 @@
config POHMELFS
tristate "POHMELFS filesystem support"
depends on NET
select CONNECTOR
select CRYPTO
select CRYPTO_BLKCIPHER
select CRYPTO_HMAC
help
POHMELFS stands for Parallel Optimized Host Message Exchange Layered
File System. This is a network filesystem which supports coherent
caching of data and metadata on clients.
config POHMELFS_DEBUG
bool "POHMELFS debugging"
depends on POHMELFS
default n
help
Turns on excessive POHMELFS debugging facilities.
You usually do not want to slow things down noticeably and get really
lots of kernel messages in syslog.

View File

@ -1,3 +0,0 @@
obj-$(CONFIG_POHMELFS) += pohmelfs.o
pohmelfs-y := inode.o config.o dir.o net.o path_entry.o trans.o crypto.o lock.o mcache.o

View File

@ -1,611 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/connector.h>
#include <linux/crypto.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/slab.h>
#include "netfs.h"
/*
* Global configuration list.
* Each client can be asked to get one of them.
*
* Allows to provide remote server address (ipv4/v6/whatever), port
* and so on via kernel connector.
*/
static struct cb_id pohmelfs_cn_id = {.idx = POHMELFS_CN_IDX, .val = POHMELFS_CN_VAL};
static LIST_HEAD(pohmelfs_config_list);
static DEFINE_MUTEX(pohmelfs_config_lock);
static inline int pohmelfs_config_eql(struct pohmelfs_ctl *sc, struct pohmelfs_ctl *ctl)
{
if (sc->idx == ctl->idx && sc->type == ctl->type &&
sc->proto == ctl->proto &&
sc->addrlen == ctl->addrlen &&
!memcmp(&sc->addr, &ctl->addr, ctl->addrlen))
return 1;
return 0;
}
static struct pohmelfs_config_group *pohmelfs_find_config_group(unsigned int idx)
{
struct pohmelfs_config_group *g, *group = NULL;
list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
if (g->idx == idx) {
group = g;
break;
}
}
return group;
}
static struct pohmelfs_config_group *pohmelfs_find_create_config_group(unsigned int idx)
{
struct pohmelfs_config_group *g;
g = pohmelfs_find_config_group(idx);
if (g)
return g;
g = kzalloc(sizeof(struct pohmelfs_config_group), GFP_KERNEL);
if (!g)
return NULL;
INIT_LIST_HEAD(&g->config_list);
g->idx = idx;
g->num_entry = 0;
list_add_tail(&g->group_entry, &pohmelfs_config_list);
return g;
}
static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst)
{
struct pohmelfs_config *tmp;
INIT_LIST_HEAD(&dst->config_entry);
list_for_each_entry(tmp, &psb->state_list, config_entry) {
if (dst->state.ctl.prio > tmp->state.ctl.prio)
list_add_tail(&dst->config_entry, &tmp->config_entry);
}
if (list_empty(&dst->config_entry))
list_add_tail(&dst->config_entry, &psb->state_list);
}
static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb,
struct pohmelfs_config *dst, struct pohmelfs_config *new)
{
if ((dst->state.ctl.prio == new->state.ctl.prio) &&
(dst->state.ctl.perm == new->state.ctl.perm))
return 0;
dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n",
__func__, dst->state.ctl.prio, dst->state.ctl.perm,
new->state.ctl.prio, new->state.ctl.perm);
dst->state.ctl.prio = new->state.ctl.prio;
dst->state.ctl.perm = new->state.ctl.perm;
list_del_init(&dst->config_entry);
pohmelfs_insert_config_entry(psb, dst);
return 0;
}
/*
* pohmelfs_copy_config() is used to copy new state configs from the
* config group (controlled by the netlink messages) into the superblock.
* This happens either at startup time where no transactions can access
* the list of the configs (and thus list of the network states), or at
* run-time, where it is protected by the psb->state_lock.
*/
int pohmelfs_copy_config(struct pohmelfs_sb *psb)
{
struct pohmelfs_config_group *g;
struct pohmelfs_config *c, *dst;
int err = -ENODEV;
mutex_lock(&pohmelfs_config_lock);
g = pohmelfs_find_config_group(psb->idx);
if (!g)
goto out_unlock;
/*
* Run over all entries in given config group and try to create and
* initialize those, which do not exist in superblock list.
* Skip all existing entries.
*/
list_for_each_entry(c, &g->config_list, config_entry) {
err = 0;
list_for_each_entry(dst, &psb->state_list, config_entry) {
if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) {
err = pohmelfs_move_config_entry(psb, dst, c);
if (!err)
err = -EEXIST;
break;
}
}
if (err)
continue;
dst = kzalloc(sizeof(struct pohmelfs_config), GFP_KERNEL);
if (!dst) {
err = -ENOMEM;
break;
}
memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl));
pohmelfs_insert_config_entry(psb, dst);
err = pohmelfs_state_init_one(psb, dst);
if (err) {
list_del(&dst->config_entry);
kfree(dst);
}
err = 0;
}
out_unlock:
mutex_unlock(&pohmelfs_config_lock);
return err;
}
int pohmelfs_copy_crypto(struct pohmelfs_sb *psb)
{
struct pohmelfs_config_group *g;
int err = -ENOENT;
mutex_lock(&pohmelfs_config_lock);
g = pohmelfs_find_config_group(psb->idx);
if (!g)
goto err_out_exit;
if (g->hash_string) {
err = -ENOMEM;
psb->hash_string = kstrdup(g->hash_string, GFP_KERNEL);
if (!psb->hash_string)
goto err_out_exit;
psb->hash_strlen = g->hash_strlen;
}
if (g->cipher_string) {
psb->cipher_string = kstrdup(g->cipher_string, GFP_KERNEL);
if (!psb->cipher_string)
goto err_out_free_hash_string;
psb->cipher_strlen = g->cipher_strlen;
}
if (g->hash_keysize) {
psb->hash_key = kmemdup(g->hash_key, g->hash_keysize,
GFP_KERNEL);
if (!psb->hash_key)
goto err_out_free_cipher_string;
psb->hash_keysize = g->hash_keysize;
}
if (g->cipher_keysize) {
psb->cipher_key = kmemdup(g->cipher_key, g->cipher_keysize,
GFP_KERNEL);
if (!psb->cipher_key)
goto err_out_free_hash;
psb->cipher_keysize = g->cipher_keysize;
}
mutex_unlock(&pohmelfs_config_lock);
return 0;
err_out_free_hash:
kfree(psb->hash_key);
err_out_free_cipher_string:
kfree(psb->cipher_string);
err_out_free_hash_string:
kfree(psb->hash_string);
err_out_exit:
mutex_unlock(&pohmelfs_config_lock);
return err;
}
static int pohmelfs_send_reply(int err, int msg_num, int action, struct cn_msg *msg, struct pohmelfs_ctl *ctl)
{
struct pohmelfs_cn_ack *ack;
ack = kzalloc(sizeof(struct pohmelfs_cn_ack), GFP_KERNEL);
if (!ack)
return -ENOMEM;
memcpy(&ack->msg, msg, sizeof(struct cn_msg));
if (action == POHMELFS_CTLINFO_ACK)
memcpy(&ack->ctl, ctl, sizeof(struct pohmelfs_ctl));
ack->msg.len = sizeof(struct pohmelfs_cn_ack) - sizeof(struct cn_msg);
ack->msg.ack = msg->ack + 1;
ack->error = err;
ack->msg_num = msg_num;
cn_netlink_send(&ack->msg, 0, GFP_KERNEL);
kfree(ack);
return 0;
}
static int pohmelfs_cn_disp(struct cn_msg *msg)
{
struct pohmelfs_config_group *g;
struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
struct pohmelfs_config *c, *tmp;
int err = 0, i = 1;
if (msg->len != sizeof(struct pohmelfs_ctl))
return -EBADMSG;
mutex_lock(&pohmelfs_config_lock);
g = pohmelfs_find_config_group(ctl->idx);
if (!g) {
pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL);
goto out_unlock;
}
list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
struct pohmelfs_ctl *sc = &c->state.ctl;
if (pohmelfs_send_reply(err, g->num_entry - i, POHMELFS_CTLINFO_ACK, msg, sc)) {
err = -ENOMEM;
goto out_unlock;
}
i += 1;
}
out_unlock:
mutex_unlock(&pohmelfs_config_lock);
return err;
}
static int pohmelfs_cn_dump(struct cn_msg *msg)
{
struct pohmelfs_config_group *g;
struct pohmelfs_config *c, *tmp;
int err = 0, i = 1;
int total_msg = 0;
if (msg->len != sizeof(struct pohmelfs_ctl))
return -EBADMSG;
mutex_lock(&pohmelfs_config_lock);
list_for_each_entry(g, &pohmelfs_config_list, group_entry)
total_msg += g->num_entry;
if (total_msg == 0) {
if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
err = -ENOMEM;
goto out_unlock;
}
list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
list_for_each_entry_safe(c, tmp, &g->config_list,
config_entry) {
struct pohmelfs_ctl *sc = &c->state.ctl;
if (pohmelfs_send_reply(err, total_msg - i,
POHMELFS_CTLINFO_ACK, msg,
sc)) {
err = -ENOMEM;
goto out_unlock;
}
i += 1;
}
}
out_unlock:
mutex_unlock(&pohmelfs_config_lock);
return err;
}
static int pohmelfs_cn_flush(struct cn_msg *msg)
{
struct pohmelfs_config_group *g;
struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
struct pohmelfs_config *c, *tmp;
int err = 0;
if (msg->len != sizeof(struct pohmelfs_ctl))
return -EBADMSG;
mutex_lock(&pohmelfs_config_lock);
if (ctl->idx != POHMELFS_NULL_IDX) {
g = pohmelfs_find_config_group(ctl->idx);
if (!g)
goto out_unlock;
list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
list_del(&c->config_entry);
g->num_entry--;
kfree(c);
}
} else {
list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
list_for_each_entry_safe(c, tmp, &g->config_list,
config_entry) {
list_del(&c->config_entry);
g->num_entry--;
kfree(c);
}
}
}
out_unlock:
mutex_unlock(&pohmelfs_config_lock);
pohmelfs_cn_dump(msg);
return err;
}
static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new)
{
old->perm = new->perm;
old->prio = new->prio;
return 0;
}
static int pohmelfs_cn_ctl(struct cn_msg *msg, int action)
{
struct pohmelfs_config_group *g;
struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
struct pohmelfs_config *c, *tmp;
int err = 0;
if (msg->len != sizeof(struct pohmelfs_ctl))
return -EBADMSG;
mutex_lock(&pohmelfs_config_lock);
g = pohmelfs_find_create_config_group(ctl->idx);
if (!g) {
err = -ENOMEM;
goto out_unlock;
}
list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
struct pohmelfs_ctl *sc = &c->state.ctl;
if (pohmelfs_config_eql(sc, ctl)) {
if (action == POHMELFS_FLAGS_ADD) {
err = -EEXIST;
goto out_unlock;
} else if (action == POHMELFS_FLAGS_DEL) {
list_del(&c->config_entry);
g->num_entry--;
kfree(c);
goto out_unlock;
} else if (action == POHMELFS_FLAGS_MODIFY) {
err = pohmelfs_modify_config(sc, ctl);
goto out_unlock;
} else {
err = -EEXIST;
goto out_unlock;
}
}
}
if (action == POHMELFS_FLAGS_DEL) {
err = -EBADMSG;
goto out_unlock;
}
c = kzalloc(sizeof(struct pohmelfs_config), GFP_KERNEL);
if (!c) {
err = -ENOMEM;
goto out_unlock;
}
memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl));
g->num_entry++;
list_add_tail(&c->config_entry, &g->config_list);
out_unlock:
mutex_unlock(&pohmelfs_config_lock);
if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
err = -ENOMEM;
return err;
}
static int pohmelfs_crypto_hash_init(struct pohmelfs_config_group *g, struct pohmelfs_crypto *c)
{
char *algo = (char *)c->data;
u8 *key = (u8 *)(algo + c->strlen);
if (g->hash_string)
return -EEXIST;
g->hash_string = kstrdup(algo, GFP_KERNEL);
if (!g->hash_string)
return -ENOMEM;
g->hash_strlen = c->strlen;
g->hash_keysize = c->keysize;
g->hash_key = kmemdup(key, c->keysize, GFP_KERNEL);
if (!g->hash_key) {
kfree(g->hash_string);
return -ENOMEM;
}
return 0;
}
static int pohmelfs_crypto_cipher_init(struct pohmelfs_config_group *g, struct pohmelfs_crypto *c)
{
char *algo = (char *)c->data;
u8 *key = (u8 *)(algo + c->strlen);
if (g->cipher_string)
return -EEXIST;
g->cipher_string = kstrdup(algo, GFP_KERNEL);
if (!g->cipher_string)
return -ENOMEM;
g->cipher_strlen = c->strlen;
g->cipher_keysize = c->keysize;
g->cipher_key = kmemdup(key, c->keysize, GFP_KERNEL);
if (!g->cipher_key) {
kfree(g->cipher_string);
return -ENOMEM;
}
return 0;
}
static int pohmelfs_cn_crypto(struct cn_msg *msg)
{
struct pohmelfs_crypto *crypto = (struct pohmelfs_crypto *)msg->data;
struct pohmelfs_config_group *g;
int err = 0;
dprintk("%s: idx: %u, strlen: %u, type: %u, keysize: %u, algo: %s.\n",
__func__, crypto->idx, crypto->strlen, crypto->type,
crypto->keysize, (char *)crypto->data);
mutex_lock(&pohmelfs_config_lock);
g = pohmelfs_find_create_config_group(crypto->idx);
if (!g) {
err = -ENOMEM;
goto out_unlock;
}
switch (crypto->type) {
case POHMELFS_CRYPTO_HASH:
err = pohmelfs_crypto_hash_init(g, crypto);
break;
case POHMELFS_CRYPTO_CIPHER:
err = pohmelfs_crypto_cipher_init(g, crypto);
break;
default:
err = -ENOTSUPP;
break;
}
out_unlock:
mutex_unlock(&pohmelfs_config_lock);
if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
err = -ENOMEM;
return err;
}
static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
int err;
if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
return;
switch (msg->flags) {
case POHMELFS_FLAGS_ADD:
case POHMELFS_FLAGS_DEL:
case POHMELFS_FLAGS_MODIFY:
err = pohmelfs_cn_ctl(msg, msg->flags);
break;
case POHMELFS_FLAGS_FLUSH:
err = pohmelfs_cn_flush(msg);
break;
case POHMELFS_FLAGS_SHOW:
err = pohmelfs_cn_disp(msg);
break;
case POHMELFS_FLAGS_DUMP:
err = pohmelfs_cn_dump(msg);
break;
case POHMELFS_FLAGS_CRYPTO:
err = pohmelfs_cn_crypto(msg);
break;
default:
err = -ENOSYS;
break;
}
}
int pohmelfs_config_check(struct pohmelfs_config *config, int idx)
{
struct pohmelfs_ctl *ctl = &config->state.ctl;
struct pohmelfs_config *tmp;
int err = -ENOENT;
struct pohmelfs_ctl *sc;
struct pohmelfs_config_group *g;
mutex_lock(&pohmelfs_config_lock);
g = pohmelfs_find_config_group(ctl->idx);
if (g) {
list_for_each_entry(tmp, &g->config_list, config_entry) {
sc = &tmp->state.ctl;
if (pohmelfs_config_eql(sc, ctl)) {
err = 0;
break;
}
}
}
mutex_unlock(&pohmelfs_config_lock);
return err;
}
int __init pohmelfs_config_init(void)
{
/* XXX remove (void *) cast when vanilla connector got synced */
return cn_add_callback(&pohmelfs_cn_id, "pohmelfs", (void *)pohmelfs_cn_callback);
}
void pohmelfs_config_exit(void)
{
struct pohmelfs_config *c, *tmp;
struct pohmelfs_config_group *g, *gtmp;
cn_del_callback(&pohmelfs_cn_id);
mutex_lock(&pohmelfs_config_lock);
list_for_each_entry_safe(g, gtmp, &pohmelfs_config_list, group_entry) {
list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
list_del(&c->config_entry);
kfree(c);
}
list_del(&g->group_entry);
kfree(g->hash_string);
kfree(g->cipher_string);
kfree(g);
}
mutex_unlock(&pohmelfs_config_lock);
}

View File

@ -1,878 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "netfs.h"
static struct crypto_hash *pohmelfs_init_hash(struct pohmelfs_sb *psb)
{
int err;
struct crypto_hash *hash;
hash = crypto_alloc_hash(psb->hash_string, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash)) {
err = PTR_ERR(hash);
dprintk("%s: idx: %u: failed to allocate hash '%s', err: %d.\n",
__func__, psb->idx, psb->hash_string, err);
goto err_out_exit;
}
psb->crypto_attached_size = crypto_hash_digestsize(hash);
if (!psb->hash_keysize)
return hash;
err = crypto_hash_setkey(hash, psb->hash_key, psb->hash_keysize);
if (err) {
dprintk("%s: idx: %u: failed to set key for hash '%s', err: %d.\n",
__func__, psb->idx, psb->hash_string, err);
goto err_out_free;
}
return hash;
err_out_free:
crypto_free_hash(hash);
err_out_exit:
return ERR_PTR(err);
}
static struct crypto_ablkcipher *pohmelfs_init_cipher(struct pohmelfs_sb *psb)
{
int err = -EINVAL;
struct crypto_ablkcipher *cipher;
if (!psb->cipher_keysize)
goto err_out_exit;
cipher = crypto_alloc_ablkcipher(psb->cipher_string, 0, 0);
if (IS_ERR(cipher)) {
err = PTR_ERR(cipher);
dprintk("%s: idx: %u: failed to allocate cipher '%s', err: %d.\n",
__func__, psb->idx, psb->cipher_string, err);
goto err_out_exit;
}
crypto_ablkcipher_clear_flags(cipher, ~0);
err = crypto_ablkcipher_setkey(cipher, psb->cipher_key, psb->cipher_keysize);
if (err) {
dprintk("%s: idx: %u: failed to set key for cipher '%s', err: %d.\n",
__func__, psb->idx, psb->cipher_string, err);
goto err_out_free;
}
return cipher;
err_out_free:
crypto_free_ablkcipher(cipher);
err_out_exit:
return ERR_PTR(err);
}
int pohmelfs_crypto_engine_init(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb)
{
int err;
e->page_num = 0;
e->size = PAGE_SIZE;
e->data = kmalloc(e->size, GFP_KERNEL);
if (!e->data) {
err = -ENOMEM;
goto err_out_exit;
}
if (psb->hash_string) {
e->hash = pohmelfs_init_hash(psb);
if (IS_ERR(e->hash)) {
err = PTR_ERR(e->hash);
e->hash = NULL;
goto err_out_free;
}
}
if (psb->cipher_string) {
e->cipher = pohmelfs_init_cipher(psb);
if (IS_ERR(e->cipher)) {
err = PTR_ERR(e->cipher);
e->cipher = NULL;
goto err_out_free_hash;
}
}
return 0;
err_out_free_hash:
crypto_free_hash(e->hash);
err_out_free:
kfree(e->data);
err_out_exit:
return err;
}
void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e)
{
crypto_free_hash(e->hash);
crypto_free_ablkcipher(e->cipher);
kfree(e->data);
}
static void pohmelfs_crypto_complete(struct crypto_async_request *req, int err)
{
struct pohmelfs_crypto_completion *c = req->data;
if (err == -EINPROGRESS)
return;
dprintk("%s: req: %p, err: %d.\n", __func__, req, err);
c->error = err;
complete(&c->complete);
}
static int pohmelfs_crypto_process(struct ablkcipher_request *req,
struct scatterlist *sg_dst, struct scatterlist *sg_src,
void *iv, int enc, unsigned long timeout)
{
struct pohmelfs_crypto_completion complete;
int err;
init_completion(&complete.complete);
complete.error = -EINPROGRESS;
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
pohmelfs_crypto_complete, &complete);
ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);
if (enc)
err = crypto_ablkcipher_encrypt(req);
else
err = crypto_ablkcipher_decrypt(req);
switch (err) {
case -EINPROGRESS:
case -EBUSY:
err = wait_for_completion_interruptible_timeout(&complete.complete,
timeout);
if (!err)
err = -ETIMEDOUT;
else if (err > 0)
err = complete.error;
break;
default:
break;
}
return err;
}
int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd_iv,
void *data, struct page *page, unsigned int size)
{
int err;
struct scatterlist sg;
if (!e->cipher && !e->hash)
return 0;
dprintk("%s: eng: %p, iv: %llx, data: %p, page: %p/%lu, size: %u.\n",
__func__, e, cmd_iv, data, page, (page) ? page->index : 0, size);
if (data) {
sg_init_one(&sg, data, size);
} else {
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
}
if (e->cipher) {
struct ablkcipher_request *req = e->data + crypto_hash_digestsize(e->hash);
u8 iv[32];
memset(iv, 0, sizeof(iv));
memcpy(iv, &cmd_iv, sizeof(cmd_iv));
ablkcipher_request_set_tfm(req, e->cipher);
err = pohmelfs_crypto_process(req, &sg, &sg, iv, 0, e->timeout);
if (err)
goto err_out_exit;
}
if (e->hash) {
struct hash_desc desc;
void *dst = e->data + e->size/2;
desc.tfm = e->hash;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto err_out_exit;
err = crypto_hash_update(&desc, &sg, size);
if (err)
goto err_out_exit;
err = crypto_hash_final(&desc, dst);
if (err)
goto err_out_exit;
err = !!memcmp(dst, e->data, crypto_hash_digestsize(e->hash));
if (err) {
#ifdef CONFIG_POHMELFS_DEBUG
unsigned int i;
unsigned char *recv = e->data, *calc = dst;
dprintk("%s: eng: %p, hash: %p, cipher: %p: iv : %llx, hash mismatch (recv/calc): ",
__func__, e, e->hash, e->cipher, cmd_iv);
for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) {
#if 0
dprintka("%02x ", recv[i]);
if (recv[i] != calc[i]) {
dprintka("| calc byte: %02x.\n", calc[i]);
break;
}
#else
dprintka("%02x/%02x ", recv[i], calc[i]);
#endif
}
dprintk("\n");
#endif
goto err_out_exit;
} else {
dprintk("%s: eng: %p, hash: %p, cipher: %p: hashes matched.\n",
__func__, e, e->hash, e->cipher);
}
}
dprintk("%s: eng: %p, size: %u, hash: %p, cipher: %p: completed.\n",
__func__, e, e->size, e->hash, e->cipher);
return 0;
err_out_exit:
dprintk("%s: eng: %p, hash: %p, cipher: %p: err: %d.\n",
__func__, e, e->hash, e->cipher, err);
return err;
}
static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_engine *e,
int (*iterator) (struct pohmelfs_crypto_engine *e,
struct scatterlist *dst,
struct scatterlist *src))
{
void *data = t->iovec.iov_base + sizeof(struct netfs_cmd) + t->psb->crypto_attached_size;
unsigned int size = t->iovec.iov_len - sizeof(struct netfs_cmd) - t->psb->crypto_attached_size;
struct netfs_cmd *cmd = data;
unsigned int sz, pages = t->attached_pages, i, csize, cmd_cmd, dpage_idx;
struct scatterlist sg_src, sg_dst;
int err;
while (size) {
cmd = data;
cmd_cmd = __be16_to_cpu(cmd->cmd);
csize = __be32_to_cpu(cmd->size);
cmd->iv = __cpu_to_be64(e->iv);
if (cmd_cmd == NETFS_READ_PAGES || cmd_cmd == NETFS_READ_PAGE)
csize = __be16_to_cpu(cmd->ext);
sz = csize + __be16_to_cpu(cmd->cpad) + sizeof(struct netfs_cmd);
dprintk("%s: size: %u, sz: %u, cmd_size: %u, cmd_cpad: %u.\n",
__func__, size, sz, __be32_to_cpu(cmd->size), __be16_to_cpu(cmd->cpad));
data += sz;
size -= sz;
sg_init_one(&sg_src, cmd->data, sz - sizeof(struct netfs_cmd));
sg_init_one(&sg_dst, cmd->data, sz - sizeof(struct netfs_cmd));
err = iterator(e, &sg_dst, &sg_src);
if (err)
return err;
}
if (!pages)
return 0;
dpage_idx = 0;
for (i = 0; i < t->page_num; ++i) {
struct page *page = t->pages[i];
struct page *dpage = e->pages[dpage_idx];
if (!page)
continue;
sg_init_table(&sg_src, 1);
sg_init_table(&sg_dst, 1);
sg_set_page(&sg_src, page, page_private(page), 0);
sg_set_page(&sg_dst, dpage, page_private(page), 0);
err = iterator(e, &sg_dst, &sg_src);
if (err)
return err;
pages--;
if (!pages)
break;
dpage_idx++;
}
return 0;
}
static int pohmelfs_encrypt_iterator(struct pohmelfs_crypto_engine *e,
struct scatterlist *sg_dst, struct scatterlist *sg_src)
{
struct ablkcipher_request *req = e->data;
u8 iv[32];
memset(iv, 0, sizeof(iv));
memcpy(iv, &e->iv, sizeof(e->iv));
return pohmelfs_crypto_process(req, sg_dst, sg_src, iv, 1, e->timeout);
}
static int pohmelfs_encrypt(struct pohmelfs_crypto_thread *tc)
{
struct netfs_trans *t = tc->trans;
struct pohmelfs_crypto_engine *e = &tc->eng;
struct ablkcipher_request *req = e->data;
memset(req, 0, sizeof(struct ablkcipher_request));
ablkcipher_request_set_tfm(req, e->cipher);
e->iv = pohmelfs_gen_iv(t);
return pohmelfs_trans_iter(t, e, pohmelfs_encrypt_iterator);
}
static int pohmelfs_hash_iterator(struct pohmelfs_crypto_engine *e,
struct scatterlist *sg_dst, struct scatterlist *sg_src)
{
return crypto_hash_update(e->data, sg_src, sg_src->length);
}
static int pohmelfs_hash(struct pohmelfs_crypto_thread *tc)
{
struct pohmelfs_crypto_engine *e = &tc->eng;
struct hash_desc *desc = e->data;
unsigned char *dst = tc->trans->iovec.iov_base + sizeof(struct netfs_cmd);
int err;
desc->tfm = e->hash;
desc->flags = 0;
err = crypto_hash_init(desc);
if (err)
return err;
err = pohmelfs_trans_iter(tc->trans, e, pohmelfs_hash_iterator);
if (err)
return err;
err = crypto_hash_final(desc, dst);
if (err)
return err;
{
unsigned int i;
dprintk("%s: ", __func__);
for (i = 0; i < tc->psb->crypto_attached_size; ++i)
dprintka("%02x ", dst[i]);
dprintka("\n");
}
return 0;
}
static void pohmelfs_crypto_pages_free(struct pohmelfs_crypto_engine *e)
{
unsigned int i;
for (i = 0; i < e->page_num; ++i)
__free_page(e->pages[i]);
kfree(e->pages);
}
static int pohmelfs_crypto_pages_alloc(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb)
{
unsigned int i;
e->pages = kmalloc(psb->trans_max_pages * sizeof(struct page *), GFP_KERNEL);
if (!e->pages)
return -ENOMEM;
for (i = 0; i < psb->trans_max_pages; ++i) {
e->pages[i] = alloc_page(GFP_KERNEL);
if (!e->pages[i])
break;
}
e->page_num = i;
if (!e->page_num)
goto err_out_free;
return 0;
err_out_free:
kfree(e->pages);
return -ENOMEM;
}
static void pohmelfs_sys_crypto_exit_one(struct pohmelfs_crypto_thread *t)
{
struct pohmelfs_sb *psb = t->psb;
if (t->thread)
kthread_stop(t->thread);
mutex_lock(&psb->crypto_thread_lock);
list_del(&t->thread_entry);
psb->crypto_thread_num--;
mutex_unlock(&psb->crypto_thread_lock);
pohmelfs_crypto_engine_exit(&t->eng);
pohmelfs_crypto_pages_free(&t->eng);
kfree(t);
}
static int pohmelfs_crypto_finish(struct netfs_trans *t, struct pohmelfs_sb *psb, int err)
{
struct netfs_cmd *cmd = t->iovec.iov_base;
netfs_convert_cmd(cmd);
if (likely(!err))
err = netfs_trans_finish_send(t, psb);
t->result = err;
netfs_trans_put(t);
return err;
}
void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th)
{
struct pohmelfs_sb *psb = th->psb;
th->page = NULL;
th->trans = NULL;
mutex_lock(&psb->crypto_thread_lock);
list_move_tail(&th->thread_entry, &psb->crypto_ready_list);
mutex_unlock(&psb->crypto_thread_lock);
wake_up(&psb->wait);
}
static int pohmelfs_crypto_thread_trans(struct pohmelfs_crypto_thread *t)
{
struct netfs_trans *trans;
int err = 0;
trans = t->trans;
trans->eng = NULL;
if (t->eng.hash) {
err = pohmelfs_hash(t);
if (err)
goto out_complete;
}
if (t->eng.cipher) {
err = pohmelfs_encrypt(t);
if (err)
goto out_complete;
trans->eng = &t->eng;
}
out_complete:
t->page = NULL;
t->trans = NULL;
if (!trans->eng)
pohmelfs_crypto_thread_make_ready(t);
pohmelfs_crypto_finish(trans, t->psb, err);
return err;
}
static int pohmelfs_crypto_thread_page(struct pohmelfs_crypto_thread *t)
{
struct pohmelfs_crypto_engine *e = &t->eng;
struct page *page = t->page;
int err;
WARN_ON(!PageChecked(page));
err = pohmelfs_crypto_process_input_data(e, e->iv, NULL, page, t->size);
if (!err)
SetPageUptodate(page);
else
SetPageError(page);
unlock_page(page);
page_cache_release(page);
pohmelfs_crypto_thread_make_ready(t);
return err;
}
static int pohmelfs_crypto_thread_func(void *data)
{
struct pohmelfs_crypto_thread *t = data;
while (!kthread_should_stop()) {
wait_event_interruptible(t->wait, kthread_should_stop() ||
t->trans || t->page);
if (kthread_should_stop())
break;
if (!t->trans && !t->page)
continue;
dprintk("%s: thread: %p, trans: %p, page: %p.\n",
__func__, t, t->trans, t->page);
if (t->trans)
pohmelfs_crypto_thread_trans(t);
else if (t->page)
pohmelfs_crypto_thread_page(t);
}
return 0;
}
static void pohmelfs_crypto_flush(struct pohmelfs_sb *psb, struct list_head *head)
{
while (!list_empty(head)) {
struct pohmelfs_crypto_thread *t = NULL;
mutex_lock(&psb->crypto_thread_lock);
if (!list_empty(head)) {
t = list_first_entry(head, struct pohmelfs_crypto_thread, thread_entry);
list_del_init(&t->thread_entry);
}
mutex_unlock(&psb->crypto_thread_lock);
if (t)
pohmelfs_sys_crypto_exit_one(t);
}
}
static void pohmelfs_sys_crypto_exit(struct pohmelfs_sb *psb)
{
while (!list_empty(&psb->crypto_active_list) || !list_empty(&psb->crypto_ready_list)) {
dprintk("%s: crypto_thread_num: %u.\n", __func__, psb->crypto_thread_num);
pohmelfs_crypto_flush(psb, &psb->crypto_active_list);
pohmelfs_crypto_flush(psb, &psb->crypto_ready_list);
}
}
static int pohmelfs_sys_crypto_init(struct pohmelfs_sb *psb)
{
unsigned int i;
struct pohmelfs_crypto_thread *t;
struct pohmelfs_config *c;
struct netfs_state *st;
int err;
list_for_each_entry(c, &psb->state_list, config_entry) {
st = &c->state;
err = pohmelfs_crypto_engine_init(&st->eng, psb);
if (err)
goto err_out_exit;
dprintk("%s: st: %p, eng: %p, hash: %p, cipher: %p.\n",
__func__, st, &st->eng, &st->eng.hash, &st->eng.cipher);
}
for (i = 0; i < psb->crypto_thread_num; ++i) {
err = -ENOMEM;
t = kzalloc(sizeof(struct pohmelfs_crypto_thread), GFP_KERNEL);
if (!t)
goto err_out_free_state_engines;
init_waitqueue_head(&t->wait);
t->psb = psb;
t->trans = NULL;
t->eng.thread = t;
err = pohmelfs_crypto_engine_init(&t->eng, psb);
if (err)
goto err_out_free_state_engines;
err = pohmelfs_crypto_pages_alloc(&t->eng, psb);
if (err)
goto err_out_free;
t->thread = kthread_run(pohmelfs_crypto_thread_func, t,
"pohmelfs-crypto-%d-%d", psb->idx, i);
if (IS_ERR(t->thread)) {
err = PTR_ERR(t->thread);
t->thread = NULL;
goto err_out_free;
}
if (t->eng.cipher)
psb->crypto_align_size = crypto_ablkcipher_blocksize(t->eng.cipher);
mutex_lock(&psb->crypto_thread_lock);
list_add_tail(&t->thread_entry, &psb->crypto_ready_list);
mutex_unlock(&psb->crypto_thread_lock);
}
psb->crypto_thread_num = i;
return 0;
err_out_free:
pohmelfs_sys_crypto_exit_one(t);
err_out_free_state_engines:
list_for_each_entry(c, &psb->state_list, config_entry) {
st = &c->state;
pohmelfs_crypto_engine_exit(&st->eng);
}
err_out_exit:
pohmelfs_sys_crypto_exit(psb);
return err;
}
void pohmelfs_crypto_exit(struct pohmelfs_sb *psb)
{
pohmelfs_sys_crypto_exit(psb);
kfree(psb->hash_string);
kfree(psb->cipher_string);
}
static int pohmelfs_crypt_init_complete(struct page **pages, unsigned int page_num,
void *private, int err)
{
struct pohmelfs_sb *psb = private;
psb->flags = -err;
dprintk("%s: err: %d.\n", __func__, err);
wake_up(&psb->wait);
return err;
}
static int pohmelfs_crypto_init_handshake(struct pohmelfs_sb *psb)
{
struct netfs_trans *t;
struct netfs_crypto_capabilities *cap;
struct netfs_cmd *cmd;
char *str;
int err = -ENOMEM, size;
size = sizeof(struct netfs_crypto_capabilities) +
psb->cipher_strlen + psb->hash_strlen + 2; /* 0 bytes */
t = netfs_trans_alloc(psb, size, 0, 0);
if (!t)
goto err_out_exit;
t->complete = pohmelfs_crypt_init_complete;
t->private = psb;
cmd = netfs_trans_current(t);
cap = (struct netfs_crypto_capabilities *)(cmd + 1);
str = (char *)(cap + 1);
cmd->cmd = NETFS_CAPABILITIES;
cmd->id = POHMELFS_CRYPTO_CAPABILITIES;
cmd->size = size;
cmd->start = 0;
cmd->ext = 0;
cmd->csize = 0;
netfs_convert_cmd(cmd);
netfs_trans_update(cmd, t, size);
cap->hash_strlen = psb->hash_strlen;
if (cap->hash_strlen) {
sprintf(str, "%s", psb->hash_string);
str += cap->hash_strlen;
}
cap->cipher_strlen = psb->cipher_strlen;
cap->cipher_keysize = psb->cipher_keysize;
if (cap->cipher_strlen)
sprintf(str, "%s", psb->cipher_string);
netfs_convert_crypto_capabilities(cap);
psb->flags = ~0;
err = netfs_trans_finish(t, psb);
if (err)
goto err_out_exit;
err = wait_event_interruptible_timeout(psb->wait, (psb->flags != ~0),
psb->wait_on_page_timeout);
if (!err)
err = -ETIMEDOUT;
else if (err > 0)
err = -psb->flags;
if (!err)
psb->perform_crypto = 1;
psb->flags = 0;
/*
* At this point NETFS_CAPABILITIES response command
* should setup superblock in a way, which is acceptable
* for both client and server, so if server refuses connection,
* it will send error in transaction response.
*/
if (err)
goto err_out_exit;
return 0;
err_out_exit:
return err;
}
int pohmelfs_crypto_init(struct pohmelfs_sb *psb)
{
int err;
if (!psb->cipher_string && !psb->hash_string)
return 0;
err = pohmelfs_crypto_init_handshake(psb);
if (err)
return err;
err = pohmelfs_sys_crypto_init(psb);
if (err)
return err;
return 0;
}
static int pohmelfs_crypto_thread_get(struct pohmelfs_sb *psb,
int (*action)(struct pohmelfs_crypto_thread *t, void *data), void *data)
{
struct pohmelfs_crypto_thread *t = NULL;
int err;
while (!t) {
err = wait_event_interruptible_timeout(psb->wait,
!list_empty(&psb->crypto_ready_list),
psb->wait_on_page_timeout);
t = NULL;
err = 0;
mutex_lock(&psb->crypto_thread_lock);
if (!list_empty(&psb->crypto_ready_list)) {
t = list_entry(psb->crypto_ready_list.prev,
struct pohmelfs_crypto_thread,
thread_entry);
list_move_tail(&t->thread_entry,
&psb->crypto_active_list);
action(t, data);
wake_up(&t->wait);
}
mutex_unlock(&psb->crypto_thread_lock);
}
return err;
}
static int pohmelfs_trans_crypt_action(struct pohmelfs_crypto_thread *t, void *data)
{
struct netfs_trans *trans = data;
netfs_trans_get(trans);
t->trans = trans;
dprintk("%s: t: %p, gen: %u, thread: %p.\n", __func__, trans, trans->gen, t);
return 0;
}
int pohmelfs_trans_crypt(struct netfs_trans *trans, struct pohmelfs_sb *psb)
{
if ((!psb->hash_string && !psb->cipher_string) || !psb->perform_crypto) {
netfs_trans_get(trans);
return pohmelfs_crypto_finish(trans, psb, 0);
}
return pohmelfs_crypto_thread_get(psb, pohmelfs_trans_crypt_action, trans);
}
struct pohmelfs_crypto_input_action_data {
struct page *page;
struct pohmelfs_crypto_engine *e;
u64 iv;
unsigned int size;
};
static int pohmelfs_crypt_input_page_action(struct pohmelfs_crypto_thread *t, void *data)
{
struct pohmelfs_crypto_input_action_data *act = data;
memcpy(t->eng.data, act->e->data, t->psb->crypto_attached_size);
t->size = act->size;
t->eng.iv = act->iv;
t->page = act->page;
return 0;
}
int pohmelfs_crypto_process_input_page(struct pohmelfs_crypto_engine *e,
struct page *page, unsigned int size, u64 iv)
{
struct inode *inode = page->mapping->host;
struct pohmelfs_crypto_input_action_data act;
int err = -ENOENT;
act.page = page;
act.e = e;
act.size = size;
act.iv = iv;
err = pohmelfs_crypto_thread_get(POHMELFS_SB(inode->i_sb),
pohmelfs_crypt_input_page_action, &act);
if (err)
goto err_out_exit;
return 0;
err_out_exit:
SetPageUptodate(page);
page_cache_release(page);
return err;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,182 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/mempool.h>
#include "netfs.h"
static int pohmelfs_send_lock_trans(struct pohmelfs_inode *pi,
u64 id, u64 start, u32 size, int type)
{
struct inode *inode = &pi->vfs_inode;
struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
struct netfs_trans *t;
struct netfs_cmd *cmd;
int path_len, err;
void *data;
struct netfs_lock *l;
int isize = (type & POHMELFS_LOCK_GRAB) ? 0 : sizeof(struct netfs_inode_info);
err = pohmelfs_path_length(pi);
if (err < 0)
goto err_out_exit;
path_len = err;
err = -ENOMEM;
t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize,
NETFS_TRANS_SINGLE_DST, 0);
if (!t)
goto err_out_exit;
cmd = netfs_trans_current(t);
data = cmd + 1;
err = pohmelfs_construct_path_string(pi, data, path_len);
if (err < 0)
goto err_out_free;
path_len = err;
l = data + path_len;
l->start = start;
l->size = size;
l->type = type;
l->ino = pi->ino;
cmd->cmd = NETFS_LOCK;
cmd->start = 0;
cmd->id = id;
cmd->size = sizeof(struct netfs_lock) + path_len + isize;
cmd->ext = path_len;
cmd->csize = 0;
netfs_convert_cmd(cmd);
netfs_convert_lock(l);
if (isize) {
struct netfs_inode_info *info = (struct netfs_inode_info *)(l + 1);
info->mode = inode->i_mode;
info->nlink = inode->i_nlink;
info->uid = inode->i_uid;
info->gid = inode->i_gid;
info->blocks = inode->i_blocks;
info->rdev = inode->i_rdev;
info->size = inode->i_size;
info->version = inode->i_version;
netfs_convert_inode_info(info);
}
netfs_trans_update(cmd, t, path_len + sizeof(struct netfs_lock) + isize);
return netfs_trans_finish(t, psb);
err_out_free:
netfs_trans_free(t);
err_out_exit:
printk("%s: err: %d.\n", __func__, err);
return err;
}
int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type)
{
struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
struct pohmelfs_mcache *m;
int err = -ENOMEM;
struct iattr iattr;
struct inode *inode = &pi->vfs_inode;
dprintk("%s: %p: ino: %llu, start: %llu, size: %u, "
"type: %d, locked as: %d, owned: %d.\n",
__func__, &pi->vfs_inode, pi->ino,
start, size, type, pi->lock_type,
!!test_bit(NETFS_INODE_OWNED, &pi->state));
if (!pohmelfs_need_lock(pi, type))
return 0;
m = pohmelfs_mcache_alloc(psb, start, size, NULL);
if (IS_ERR(m))
return PTR_ERR(m);
err = pohmelfs_send_lock_trans(pi, m->gen, start, size,
type | POHMELFS_LOCK_GRAB);
if (err)
goto err_out_put;
err = wait_for_completion_timeout(&m->complete, psb->mcache_timeout);
if (err)
err = m->err;
else
err = -ETIMEDOUT;
if (err) {
printk("%s: %p: ino: %llu, mgen: %llu, start: %llu, size: %u, err: %d.\n",
__func__, &pi->vfs_inode, pi->ino, m->gen, start, size, err);
}
if (err && (err != -ENOENT))
goto err_out_put;
if (!err) {
netfs_convert_inode_info(&m->info);
iattr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_SIZE | ATTR_ATIME;
iattr.ia_mode = m->info.mode;
iattr.ia_uid = m->info.uid;
iattr.ia_gid = m->info.gid;
iattr.ia_size = m->info.size;
iattr.ia_atime = CURRENT_TIME;
dprintk("%s: %p: ino: %llu, mgen: %llu, start: %llu, isize: %llu -> %llu.\n",
__func__, &pi->vfs_inode, pi->ino, m->gen, start, inode->i_size, m->info.size);
err = pohmelfs_setattr_raw(inode, &iattr);
if (!err) {
struct dentry *dentry = d_find_alias(inode);
if (dentry) {
fsnotify_change(dentry, iattr.ia_valid);
dput(dentry);
}
}
}
pi->lock_type = type;
set_bit(NETFS_INODE_OWNED, &pi->state);
pohmelfs_mcache_put(psb, m);
return 0;
err_out_put:
pohmelfs_mcache_put(psb, m);
return err;
}
int pohmelfs_data_unlock(struct pohmelfs_inode *pi, u64 start, u32 size, int type)
{
dprintk("%s: %p: ino: %llu, start: %llu, size: %u, type: %d.\n",
__func__, &pi->vfs_inode, pi->ino, start, size, type);
pi->lock_type = 0;
clear_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state);
clear_bit(NETFS_INODE_OWNED, &pi->state);
return pohmelfs_send_lock_trans(pi, pi->ino, start, size, type);
}

View File

@ -1,171 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include "netfs.h"
static struct kmem_cache *pohmelfs_mcache_cache;
static mempool_t *pohmelfs_mcache_pool;
static inline int pohmelfs_mcache_cmp(u64 gen, u64 new)
{
if (gen < new)
return 1;
if (gen > new)
return -1;
return 0;
}
struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen)
{
struct rb_root *root = &psb->mcache_root;
struct rb_node *n = root->rb_node;
struct pohmelfs_mcache *tmp, *ret = NULL;
int cmp;
while (n) {
tmp = rb_entry(n, struct pohmelfs_mcache, mcache_entry);
cmp = pohmelfs_mcache_cmp(tmp->gen, gen);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else {
ret = tmp;
pohmelfs_mcache_get(ret);
break;
}
}
return ret;
}
static int pohmelfs_mcache_insert(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
struct rb_root *root = &psb->mcache_root;
struct rb_node **n = &root->rb_node, *parent = NULL;
struct pohmelfs_mcache *ret = NULL, *tmp;
int cmp;
while (*n) {
parent = *n;
tmp = rb_entry(parent, struct pohmelfs_mcache, mcache_entry);
cmp = pohmelfs_mcache_cmp(tmp->gen, m->gen);
if (cmp < 0)
n = &parent->rb_left;
else if (cmp > 0)
n = &parent->rb_right;
else {
ret = tmp;
break;
}
}
if (ret)
return -EEXIST;
rb_link_node(&m->mcache_entry, parent, n);
rb_insert_color(&m->mcache_entry, root);
return 0;
}
static int pohmelfs_mcache_remove(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
if (m && m->mcache_entry.rb_parent_color) {
rb_erase(&m->mcache_entry, &psb->mcache_root);
m->mcache_entry.rb_parent_color = 0;
return 1;
}
return 0;
}
void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
mutex_lock(&psb->mcache_lock);
pohmelfs_mcache_remove(psb, m);
mutex_unlock(&psb->mcache_lock);
}
struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
unsigned int size, void *data)
{
struct pohmelfs_mcache *m;
int err = -ENOMEM;
m = mempool_alloc(pohmelfs_mcache_pool, GFP_KERNEL);
if (!m)
goto err_out_exit;
init_completion(&m->complete);
m->err = 0;
atomic_set(&m->refcnt, 1);
m->data = data;
m->start = start;
m->size = size;
m->gen = atomic_long_inc_return(&psb->mcache_gen);
mutex_lock(&psb->mcache_lock);
err = pohmelfs_mcache_insert(psb, m);
mutex_unlock(&psb->mcache_lock);
if (err)
goto err_out_free;
return m;
err_out_free:
mempool_free(m, pohmelfs_mcache_pool);
err_out_exit:
return ERR_PTR(err);
}
void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
{
pohmelfs_mcache_remove_locked(psb, m);
mempool_free(m, pohmelfs_mcache_pool);
}
int __init pohmelfs_mcache_init(void)
{
pohmelfs_mcache_cache = kmem_cache_create("pohmelfs_mcache_cache",
sizeof(struct pohmelfs_mcache),
0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), NULL);
if (!pohmelfs_mcache_cache)
goto err_out_exit;
pohmelfs_mcache_pool = mempool_create_slab_pool(256, pohmelfs_mcache_cache);
if (!pohmelfs_mcache_pool)
goto err_out_free;
return 0;
err_out_free:
kmem_cache_destroy(pohmelfs_mcache_cache);
err_out_exit:
return -ENOMEM;
}
void pohmelfs_mcache_exit(void)
{
mempool_destroy(pohmelfs_mcache_pool);
kmem_cache_destroy(pohmelfs_mcache_cache);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,919 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __NETFS_H
#define __NETFS_H
#include <linux/types.h>
#include <linux/connector.h>
#include <linux/backing-dev.h>
#define POHMELFS_CN_IDX 5
#define POHMELFS_CN_VAL 0
#define POHMELFS_CTLINFO_ACK 1
#define POHMELFS_NOINFO_ACK 2
#define POHMELFS_NULL_IDX 65535
/*
* Network command structure.
* Will be extended.
*/
struct netfs_cmd {
__u16 cmd; /* Command number */
__u16 csize; /* Attached crypto information size */
__u16 cpad; /* Attached padding size */
__u16 ext; /* External flags */
__u32 size; /* Size of the attached data */
__u32 trans; /* Transaction id */
__u64 id; /* Object ID to operate on. Used for feedback.*/
__u64 start; /* Start of the object. */
__u64 iv; /* IV sequence */
__u8 data[0];
};
static inline void netfs_convert_cmd(struct netfs_cmd *cmd)
{
cmd->id = __be64_to_cpu(cmd->id);
cmd->start = __be64_to_cpu(cmd->start);
cmd->iv = __be64_to_cpu(cmd->iv);
cmd->cmd = __be16_to_cpu(cmd->cmd);
cmd->ext = __be16_to_cpu(cmd->ext);
cmd->csize = __be16_to_cpu(cmd->csize);
cmd->cpad = __be16_to_cpu(cmd->cpad);
cmd->size = __be32_to_cpu(cmd->size);
}
#define NETFS_TRANS_SINGLE_DST (1<<0)
enum {
NETFS_READDIR = 1, /* Read directory for given inode number */
NETFS_READ_PAGE, /* Read data page from the server */
NETFS_WRITE_PAGE, /* Write data page to the server */
NETFS_CREATE, /* Create directory entry */
NETFS_REMOVE, /* Remove directory entry */
NETFS_LOOKUP, /* Lookup single object */
NETFS_LINK, /* Create a link */
NETFS_TRANS, /* Transaction */
NETFS_OPEN, /* Open intent */
NETFS_INODE_INFO, /* Metadata cache coherency synchronization message */
NETFS_PAGE_CACHE, /* Page cache invalidation message */
NETFS_READ_PAGES, /* Read multiple contiguous pages in one go */
NETFS_RENAME, /* Rename object */
NETFS_CAPABILITIES, /* Capabilities of the client, for example supported crypto */
NETFS_LOCK, /* Distributed lock message */
NETFS_XATTR_SET, /* Set extended attribute */
NETFS_XATTR_GET, /* Get extended attribute */
NETFS_CMD_MAX
};
enum {
POHMELFS_FLAGS_ADD = 0, /* Network state control message for ADD */
POHMELFS_FLAGS_DEL, /* Network state control message for DEL */
POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */
POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */
POHMELFS_FLAGS_MODIFY, /* Network state modification message */
POHMELFS_FLAGS_DUMP, /* Network state control message for SHOW ALL */
POHMELFS_FLAGS_FLUSH, /* Network state control message for FLUSH */
};
/*
* Always wanted to copy it from socket headers into public one,
* since they are __KERNEL__ protected there.
*/
#define _K_SS_MAXSIZE 128
struct saddr {
unsigned short sa_family;
char addr[_K_SS_MAXSIZE];
};
enum {
POHMELFS_CRYPTO_HASH = 0,
POHMELFS_CRYPTO_CIPHER,
};
struct pohmelfs_crypto {
unsigned int idx; /* Config index */
unsigned short strlen; /* Size of the attached crypto string including 0-byte
* "cbc(aes)" for example */
unsigned short type; /* HMAC, cipher, both */
unsigned int keysize; /* Key size */
unsigned char data[0]; /* Algorithm string, key and IV */
};
#define POHMELFS_IO_PERM_READ (1<<0)
#define POHMELFS_IO_PERM_WRITE (1<<1)
/*
* Configuration command used to create table of different remote servers.
*/
struct pohmelfs_ctl {
__u32 idx; /* Config index */
__u32 type; /* Socket type */
__u32 proto; /* Socket protocol */
__u16 addrlen; /* Size of the address */
__u16 perm; /* IO permission */
__u16 prio; /* IO priority */
struct saddr addr; /* Remote server address */
};
/*
* Ack for userspace about requested command.
*/
struct pohmelfs_cn_ack {
struct cn_msg msg;
int error;
int msg_num;
int unused[3];
struct pohmelfs_ctl ctl;
};
/*
* Inode info structure used to sync with server.
* Check what stat() returns.
*/
struct netfs_inode_info {
unsigned int mode;
unsigned int nlink;
unsigned int uid;
unsigned int gid;
unsigned int blocksize;
unsigned int padding;
__u64 ino;
__u64 blocks;
__u64 rdev;
__u64 size;
__u64 version;
};
static inline void netfs_convert_inode_info(struct netfs_inode_info *info)
{
info->mode = __cpu_to_be32(info->mode);
info->nlink = __cpu_to_be32(info->nlink);
info->uid = __cpu_to_be32(info->uid);
info->gid = __cpu_to_be32(info->gid);
info->blocksize = __cpu_to_be32(info->blocksize);
info->blocks = __cpu_to_be64(info->blocks);
info->rdev = __cpu_to_be64(info->rdev);
info->size = __cpu_to_be64(info->size);
info->version = __cpu_to_be64(info->version);
info->ino = __cpu_to_be64(info->ino);
}
/*
* Cache state machine.
*/
enum {
NETFS_COMMAND_PENDING = 0, /* Command is being executed */
NETFS_INODE_REMOTE_SYNCED, /* Inode was synced to server */
NETFS_INODE_REMOTE_DIR_SYNCED, /* Inode (directory) was synced from the server */
NETFS_INODE_OWNED, /* Inode is owned by given host */
NETFS_INODE_NEED_FLUSH, /* Inode has to be flushed to the server */
};
/*
* POHMELFS capabilities: information about supported
* crypto operations (hash/cipher, modes, key sizes and so on),
* root information (used/available size, number of objects, permissions)
*/
enum pohmelfs_capabilities {
POHMELFS_CRYPTO_CAPABILITIES = 0,
POHMELFS_ROOT_CAPABILITIES,
};
/* Read-only mount */
#define POHMELFS_FLAGS_RO (1<<0)
/* Extended attributes support on/off */
#define POHMELFS_FLAGS_XATTR (1<<1)
struct netfs_root_capabilities {
__u64 nr_files;
__u64 used, avail;
__u64 flags;
};
static inline void netfs_convert_root_capabilities(struct netfs_root_capabilities *cap)
{
cap->nr_files = __cpu_to_be64(cap->nr_files);
cap->used = __cpu_to_be64(cap->used);
cap->avail = __cpu_to_be64(cap->avail);
cap->flags = __cpu_to_be64(cap->flags);
}
struct netfs_crypto_capabilities {
unsigned short hash_strlen; /* Hash string length, like "hmac(sha1) including 0 byte "*/
unsigned short cipher_strlen; /* Cipher string length with the same format */
unsigned int cipher_keysize; /* Cipher key size */
};
static inline void netfs_convert_crypto_capabilities(struct netfs_crypto_capabilities *cap)
{
cap->hash_strlen = __cpu_to_be16(cap->hash_strlen);
cap->cipher_strlen = __cpu_to_be16(cap->cipher_strlen);
cap->cipher_keysize = __cpu_to_be32(cap->cipher_keysize);
}
enum pohmelfs_lock_type {
POHMELFS_LOCK_GRAB = (1<<15),
POHMELFS_READ_LOCK = 0,
POHMELFS_WRITE_LOCK,
};
struct netfs_lock {
__u64 start;
__u64 ino;
__u32 size;
__u32 type;
};
static inline void netfs_convert_lock(struct netfs_lock *lock)
{
lock->start = __cpu_to_be64(lock->start);
lock->ino = __cpu_to_be64(lock->ino);
lock->size = __cpu_to_be32(lock->size);
lock->type = __cpu_to_be32(lock->type);
}
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/rbtree.h>
#include <linux/net.h>
#include <linux/poll.h>
/*
* Private POHMELFS cache of objects in directory.
*/
struct pohmelfs_name {
struct rb_node hash_node;
struct list_head sync_create_entry;
u64 ino;
u32 hash;
u32 mode;
u32 len;
char *data;
};
/*
* POHMELFS inode. Main object.
*/
struct pohmelfs_inode {
struct list_head inode_entry; /* Entry in superblock list.
* Objects which are not bound to dentry require to be dropped
* in ->put_super()
*/
struct rb_root hash_root; /* The same, but indexed by name hash and len */
struct mutex offset_lock; /* Protect both above trees */
struct list_head sync_create_list; /* List of created but not yet synced to the server children */
unsigned int drop_count;
int lock_type; /* How this inode is locked: read or write */
int error; /* Transaction error for given inode */
long state; /* State machine above */
u64 ino; /* Inode number */
u64 total_len; /* Total length of all children names, used to create offsets */
struct inode vfs_inode;
};
struct netfs_trans;
typedef int (*netfs_trans_complete_t)(struct page **pages, unsigned int page_num,
void *private, int err);
struct netfs_state;
struct pohmelfs_sb;
struct netfs_trans {
/*
* Transaction header and attached contiguous data live here.
*/
struct iovec iovec;
/*
* Pages attached to transaction.
*/
struct page **pages;
/*
* List and protecting lock for transaction destination
* network states.
*/
spinlock_t dst_lock;
struct list_head dst_list;
/*
* Number of users for given transaction.
* For example each network state attached to transaction
* via dst_list increases it.
*/
atomic_t refcnt;
/*
* Number of pages attached to given transaction.
* Some slots in above page array can be NULL, since
* for example page can be under writeback already,
* so we skip it in this transaction.
*/
unsigned int page_num;
/*
* Transaction flags: single dst or broadcast and so on.
*/
unsigned int flags;
/*
* Size of the data, which can be placed into
* iovec.iov_base area.
*/
unsigned int total_size;
/*
* Number of pages to be sent to remote server.
* Usually equal to above page_num, but in case of partial
* writeback it can accumulate only pages already completed
* previous writeback.
*/
unsigned int attached_pages;
/*
* Attached number of bytes in all above pages.
*/
unsigned int attached_size;
/*
* Unique transacton generation number.
* Used as identity in the network state tree of transactions.
*/
unsigned int gen;
/*
* Transaction completion status.
*/
int result;
/*
* Superblock this transaction belongs to
*/
struct pohmelfs_sb *psb;
/*
* Crypto engine, which processed this transaction.
* Can be not NULL only if crypto engine holds encrypted pages.
*/
struct pohmelfs_crypto_engine *eng;
/* Private data */
void *private;
/* Completion callback, invoked just before transaction is destroyed */
netfs_trans_complete_t complete;
};
static inline int netfs_trans_cur_len(struct netfs_trans *t)
{
return (signed)(t->total_size - t->iovec.iov_len);
}
static inline void *netfs_trans_current(struct netfs_trans *t)
{
return t->iovec.iov_base + t->iovec.iov_len;
}
struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
unsigned int flags, unsigned int nr);
void netfs_trans_free(struct netfs_trans *t);
int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb);
int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb);
static inline void netfs_trans_reset(struct netfs_trans *t)
{
t->complete = NULL;
}
struct netfs_trans_dst {
struct list_head trans_entry;
struct rb_node state_entry;
unsigned long send_time;
/*
* Times this transaction was resent to its old or new,
* depending on flags, destinations. When it reaches maximum
* allowed number, specified in superblock->trans_retries,
* transaction will be freed with ETIMEDOUT error.
*/
unsigned int retries;
struct netfs_trans *trans;
struct netfs_state *state;
};
struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen);
void netfs_trans_drop_dst(struct netfs_trans_dst *dst);
void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst);
void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st);
void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st);
int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb);
int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st);
int netfs_trans_init(void);
void netfs_trans_exit(void);
struct pohmelfs_crypto_engine {
u64 iv; /* Crypto IV for current operation */
unsigned long timeout; /* Crypto waiting timeout */
unsigned int size; /* Size of crypto scratchpad */
void *data; /* Temporal crypto scratchpad */
/*
* Crypto operations performed on objects.
*/
struct crypto_hash *hash;
struct crypto_ablkcipher *cipher;
struct pohmelfs_crypto_thread *thread; /* Crypto thread which hosts this engine */
struct page **pages;
unsigned int page_num;
};
struct pohmelfs_crypto_thread {
struct list_head thread_entry;
struct task_struct *thread;
struct pohmelfs_sb *psb;
struct pohmelfs_crypto_engine eng;
struct netfs_trans *trans;
wait_queue_head_t wait;
int error;
unsigned int size;
struct page *page;
};
void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th);
/*
* Network state, attached to one server.
*/
struct netfs_state {
struct mutex __state_lock; /* Can not allow to use the same socket simultaneously */
struct mutex __state_send_lock;
struct netfs_cmd cmd; /* Cached command */
struct netfs_inode_info info; /* Cached inode info */
void *data; /* Cached some data */
unsigned int size; /* Size of that data */
struct pohmelfs_sb *psb; /* Superblock */
struct task_struct *thread; /* Async receiving thread */
/* Waiting/polling machinery */
wait_queue_t wait;
wait_queue_head_t *whead;
wait_queue_head_t thread_wait;
struct mutex trans_lock;
struct rb_root trans_root;
struct pohmelfs_ctl ctl; /* Remote peer */
struct socket *socket; /* Socket object */
struct socket *read_socket; /* Cached pointer to socket object.
* Used to determine if between lock drops socket was changed.
* Never used to read data or any kind of access.
*/
/*
* Crypto engines to process incoming data.
*/
struct pohmelfs_crypto_engine eng;
int need_reset;
};
int netfs_state_init(struct netfs_state *st);
void netfs_state_exit(struct netfs_state *st);
static inline void netfs_state_lock_send(struct netfs_state *st)
{
mutex_lock(&st->__state_send_lock);
}
static inline int netfs_state_trylock_send(struct netfs_state *st)
{
return mutex_trylock(&st->__state_send_lock);
}
static inline void netfs_state_unlock_send(struct netfs_state *st)
{
BUG_ON(!mutex_is_locked(&st->__state_send_lock));
mutex_unlock(&st->__state_send_lock);
}
static inline void netfs_state_lock(struct netfs_state *st)
{
mutex_lock(&st->__state_lock);
}
static inline void netfs_state_unlock(struct netfs_state *st)
{
BUG_ON(!mutex_is_locked(&st->__state_lock));
mutex_unlock(&st->__state_lock);
}
static inline unsigned int netfs_state_poll(struct netfs_state *st)
{
unsigned int revents = POLLHUP | POLLERR;
netfs_state_lock(st);
if (st->socket)
revents = st->socket->ops->poll(NULL, st->socket, NULL);
netfs_state_unlock(st);
return revents;
}
struct pohmelfs_config;
struct pohmelfs_sb {
struct rb_root mcache_root;
struct mutex mcache_lock;
atomic_long_t mcache_gen;
unsigned long mcache_timeout;
unsigned int idx;
unsigned int trans_retries;
atomic_t trans_gen;
unsigned int crypto_attached_size;
unsigned int crypto_align_size;
unsigned int crypto_fail_unsupported;
unsigned int crypto_thread_num;
struct list_head crypto_active_list, crypto_ready_list;
struct mutex crypto_thread_lock;
unsigned int trans_max_pages;
unsigned long trans_data_size;
unsigned long trans_timeout;
unsigned long drop_scan_timeout;
unsigned long trans_scan_timeout;
unsigned long wait_on_page_timeout;
struct list_head flush_list;
struct list_head drop_list;
spinlock_t ino_lock;
u64 ino;
/*
* Remote nodes POHMELFS connected to.
*/
struct list_head state_list;
struct mutex state_lock;
/*
* Currently active state to request data from.
*/
struct pohmelfs_config *active_state;
wait_queue_head_t wait;
/*
* Timed checks: stale transactions, inodes to be freed and so on.
*/
struct delayed_work dwork;
struct delayed_work drop_dwork;
struct super_block *sb;
struct backing_dev_info bdi;
/*
* Algorithm strings.
*/
char *hash_string;
char *cipher_string;
u8 *hash_key;
u8 *cipher_key;
/*
* Algorithm string lengths.
*/
unsigned int hash_strlen;
unsigned int cipher_strlen;
unsigned int hash_keysize;
unsigned int cipher_keysize;
/*
* Controls whether to perfrom crypto processing or not.
*/
int perform_crypto;
/*
* POHMELFS statistics.
*/
u64 total_size;
u64 avail_size;
atomic_long_t total_inodes;
/*
* Xattr support, read-only and so on.
*/
u64 state_flags;
/*
* Temporary storage to detect changes in the wait queue.
*/
long flags;
};
static inline void netfs_trans_update(struct netfs_cmd *cmd,
struct netfs_trans *t, unsigned int size)
{
unsigned int sz = ALIGN(size, t->psb->crypto_align_size);
t->iovec.iov_len += sizeof(struct netfs_cmd) + sz;
cmd->cpad = __cpu_to_be16(sz - size);
}
static inline struct pohmelfs_sb *POHMELFS_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
static inline struct pohmelfs_inode *POHMELFS_I(struct inode *inode)
{
return container_of(inode, struct pohmelfs_inode, vfs_inode);
}
static inline u64 pohmelfs_new_ino(struct pohmelfs_sb *psb)
{
u64 ino;
spin_lock(&psb->ino_lock);
ino = psb->ino++;
spin_unlock(&psb->ino_lock);
return ino;
}
static inline void pohmelfs_put_inode(struct pohmelfs_inode *pi)
{
struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
spin_lock(&psb->ino_lock);
list_move_tail(&pi->inode_entry, &psb->drop_list);
pi->drop_count++;
spin_unlock(&psb->ino_lock);
}
struct pohmelfs_config {
struct list_head config_entry;
struct netfs_state state;
};
struct pohmelfs_config_group {
/*
* Entry in the global config group list.
*/
struct list_head group_entry;
/*
* Index of the current group.
*/
unsigned int idx;
/*
* Number of config_list entries in this group entry.
*/
unsigned int num_entry;
/*
* Algorithm strings.
*/
char *hash_string;
char *cipher_string;
/*
* Algorithm string lengths.
*/
unsigned int hash_strlen;
unsigned int cipher_strlen;
/*
* Key and its size.
*/
unsigned int hash_keysize;
unsigned int cipher_keysize;
u8 *hash_key;
u8 *cipher_key;
/*
* List of config entries (network state info) for given idx.
*/
struct list_head config_list;
};
int __init pohmelfs_config_init(void);
void pohmelfs_config_exit(void);
int pohmelfs_copy_config(struct pohmelfs_sb *psb);
int pohmelfs_copy_crypto(struct pohmelfs_sb *psb);
int pohmelfs_config_check(struct pohmelfs_config *config, int idx);
int pohmelfs_state_init_one(struct pohmelfs_sb *psb, struct pohmelfs_config *conf);
extern const struct file_operations pohmelfs_dir_fops;
extern const struct inode_operations pohmelfs_dir_inode_ops;
int pohmelfs_state_init(struct pohmelfs_sb *psb);
void pohmelfs_state_exit(struct pohmelfs_sb *psb);
void pohmelfs_state_flush_transactions(struct netfs_state *st);
void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info);
void pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *n);
void pohmelfs_free_names(struct pohmelfs_inode *parent);
struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash);
void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi);
struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode);
int pohmelfs_write_create_inode(struct pohmelfs_inode *pi);
int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans);
int pohmelfs_remove_child(struct pohmelfs_inode *parent, struct pohmelfs_name *n);
struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
struct pohmelfs_inode *parent, struct qstr *str,
struct netfs_inode_info *info, int link);
int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr);
int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr);
int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
netfs_trans_complete_t complete, void *priv, u64 start);
int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start);
void pohmelfs_check_states(struct pohmelfs_sb *psb);
void pohmelfs_switch_active(struct pohmelfs_sb *psb);
int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len);
int pohmelfs_path_length(struct pohmelfs_inode *pi);
struct pohmelfs_crypto_completion {
struct completion complete;
int error;
};
int pohmelfs_trans_crypt(struct netfs_trans *t, struct pohmelfs_sb *psb);
void pohmelfs_crypto_exit(struct pohmelfs_sb *psb);
int pohmelfs_crypto_init(struct pohmelfs_sb *psb);
int pohmelfs_crypto_engine_init(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb);
void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e);
int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 iv,
void *data, struct page *page, unsigned int size);
int pohmelfs_crypto_process_input_page(struct pohmelfs_crypto_engine *e,
struct page *page, unsigned int size, u64 iv);
static inline u64 pohmelfs_gen_iv(struct netfs_trans *t)
{
u64 iv = t->gen;
iv <<= 32;
iv |= ((unsigned long)t) & 0xffffffff;
return iv;
}
int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type);
int pohmelfs_data_unlock(struct pohmelfs_inode *pi, u64 start, u32 size, int type);
int pohmelfs_data_lock_response(struct netfs_state *st);
static inline int pohmelfs_need_lock(struct pohmelfs_inode *pi, int type)
{
if (test_bit(NETFS_INODE_OWNED, &pi->state)) {
if (type == pi->lock_type)
return 0;
if ((type == POHMELFS_READ_LOCK) && (pi->lock_type == POHMELFS_WRITE_LOCK))
return 0;
}
if (!test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
return 0;
return 1;
}
int __init pohmelfs_mcache_init(void);
void pohmelfs_mcache_exit(void);
/* #define CONFIG_POHMELFS_DEBUG */
#ifdef CONFIG_POHMELFS_DEBUG
#define dprintka(f, a...) printk(f, ##a)
#define dprintk(f, a...) printk("%d: " f, task_pid_vnr(current), ##a)
#else
#define dprintka(f, a...) do {} while (0)
#define dprintk(f, a...) do {} while (0)
#endif
static inline void netfs_trans_get(struct netfs_trans *t)
{
atomic_inc(&t->refcnt);
}
static inline void netfs_trans_put(struct netfs_trans *t)
{
if (atomic_dec_and_test(&t->refcnt)) {
dprintk("%s: t: %p, gen: %u, err: %d.\n",
__func__, t, t->gen, t->result);
if (t->complete)
t->complete(t->pages, t->page_num,
t->private, t->result);
netfs_trans_free(t);
}
}
struct pohmelfs_mcache {
struct rb_node mcache_entry;
struct completion complete;
atomic_t refcnt;
u64 gen;
void *data;
u64 start;
u32 size;
int err;
struct netfs_inode_info info;
};
struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
unsigned int size, void *data);
void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m);
struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen);
void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m);
static inline void pohmelfs_mcache_get(struct pohmelfs_mcache *m)
{
atomic_inc(&m->refcnt);
}
static inline void pohmelfs_mcache_put(struct pohmelfs_sb *psb,
struct pohmelfs_mcache *m)
{
if (atomic_dec_and_test(&m->refcnt))
pohmelfs_mcache_free(psb, m);
}
/*#define POHMELFS_TRUNCATE_ON_INODE_FLUSH
*/
#endif /* __KERNEL__*/
#endif /* __NETFS_H */

View File

@ -1,120 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ktime.h>
#include <linux/fs_struct.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/mount.h>
#include <linux/mm.h>
#include "netfs.h"
#define UNHASHED_OBSCURE_STRING_SIZE sizeof(" (deleted)")
/*
* Create path from root for given inode.
* Path is formed as set of stuctures, containing name of the object
* and its inode data (mode, permissions and so on).
*/
int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len)
{
struct path path;
struct dentry *d;
char *ptr;
int err = 0, strlen, reduce = 0;
d = d_find_alias(&pi->vfs_inode);
if (!d) {
printk("%s: no alias, list_empty: %d.\n", __func__, list_empty(&pi->vfs_inode.i_dentry));
return -ENOENT;
}
spin_lock(&current->fs->lock);
path.mnt = mntget(current->fs->root.mnt);
spin_unlock(&current->fs->lock);
path.dentry = d;
if (!IS_ROOT(d) && d_unhashed(d))
reduce = 1;
ptr = d_path(&path, data, len);
if (IS_ERR(ptr)) {
err = PTR_ERR(ptr);
goto out;
}
if (reduce && len >= UNHASHED_OBSCURE_STRING_SIZE) {
char *end = data + len - UNHASHED_OBSCURE_STRING_SIZE;
*end = '\0';
}
strlen = len - (ptr - (char *)data);
memmove(data, ptr, strlen);
ptr = data;
err = strlen;
dprintk("%s: dname: '%s', len: %u, maxlen: %u, name: '%s', strlen: %d.\n",
__func__, d->d_name.name, d->d_name.len, len, ptr, strlen);
out:
dput(d);
mntput(path.mnt);
return err;
}
int pohmelfs_path_length(struct pohmelfs_inode *pi)
{
struct dentry *d, *root, *first;
int len;
unsigned seq;
first = d_find_alias(&pi->vfs_inode);
if (!first) {
dprintk("%s: ino: %llu, mode: %o.\n", __func__, pi->ino, pi->vfs_inode.i_mode);
return -ENOENT;
}
spin_lock(&current->fs->lock);
root = dget(current->fs->root.dentry);
spin_unlock(&current->fs->lock);
rename_retry:
len = 1; /* Root slash */
d = first;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
if (!IS_ROOT(d) && d_unhashed(d))
len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */
while (d && d != root && !IS_ROOT(d)) {
len += d->d_name.len + 1; /* Plus slash */
d = d->d_parent;
}
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
dput(root);
dput(first);
return len + 1; /* Including zero-byte */
}

View File

@ -1,706 +0,0 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/fs.h>
#include <linux/jhash.h>
#include <linux/hash.h>
#include <linux/ktime.h>
#include <linux/mempool.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
#include <linux/poll.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/writeback.h>
#include "netfs.h"
static struct kmem_cache *netfs_trans_dst;
static mempool_t *netfs_trans_dst_pool;
static void netfs_trans_init_static(struct netfs_trans *t, int num, int size)
{
t->page_num = num;
t->total_size = size;
atomic_set(&t->refcnt, 1);
spin_lock_init(&t->dst_lock);
INIT_LIST_HEAD(&t->dst_list);
}
static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
{
int err = 0;
unsigned int i, attached_pages = t->attached_pages, ci;
struct msghdr msg;
struct page **pages = (t->eng) ? t->eng->pages : t->pages;
struct page *p;
unsigned int size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_WAITALL | MSG_MORE;
ci = 0;
for (i = 0; i < t->page_num; ++i) {
struct page *page = pages[ci];
struct netfs_cmd cmd;
struct iovec io;
p = t->pages[i];
if (!p)
continue;
size = page_private(p);
io.iov_base = &cmd;
io.iov_len = sizeof(struct netfs_cmd);
cmd.cmd = NETFS_WRITE_PAGE;
cmd.ext = 0;
cmd.id = 0;
cmd.size = size;
cmd.start = p->index;
cmd.start <<= PAGE_CACHE_SHIFT;
cmd.csize = 0;
cmd.cpad = 0;
cmd.iv = pohmelfs_gen_iv(t);
netfs_convert_cmd(&cmd);
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_flags = MSG_WAITALL | MSG_MORE;
err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, sizeof(struct netfs_cmd));
if (err <= 0) {
printk("%s: %d/%d failed to send transaction header: t: %p, gen: %u, err: %d.\n",
__func__, i, t->page_num, t, t->gen, err);
if (err == 0)
err = -ECONNRESET;
goto err_out;
}
msg.msg_flags = MSG_WAITALL | (attached_pages == 1 ? 0 :
MSG_MORE);
err = kernel_sendpage(st->socket, page, 0, size, msg.msg_flags);
if (err <= 0) {
printk("%s: %d/%d failed to send transaction page: t: %p, gen: %u, size: %u, err: %d.\n",
__func__, i, t->page_num, t, t->gen, size, err);
if (err == 0)
err = -ECONNRESET;
goto err_out;
}
dprintk("%s: %d/%d sent t: %p, gen: %u, page: %p/%p, size: %u.\n",
__func__, i, t->page_num, t, t->gen, page, p, size);
err = 0;
attached_pages--;
if (!attached_pages)
break;
ci++;
continue;
err_out:
printk("%s: t: %p, gen: %u, err: %d.\n", __func__, t, t->gen, err);
netfs_state_exit(st);
break;
}
return err;
}
int netfs_trans_send(struct netfs_trans *t, struct netfs_state *st)
{
int err;
struct msghdr msg;
BUG_ON(!t->iovec.iov_len);
BUG_ON(t->iovec.iov_len > 1024*1024*1024);
netfs_state_lock_send(st);
if (!st->socket) {
err = netfs_state_init(st);
if (err)
goto err_out_unlock_return;
}
msg.msg_iov = &t->iovec;
msg.msg_iovlen = 1;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_WAITALL;
if (t->attached_pages)
msg.msg_flags |= MSG_MORE;
err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, t->iovec.iov_len);
if (err <= 0) {
printk("%s: failed to send contig transaction: t: %p, gen: %u, size: %zu, err: %d.\n",
__func__, t, t->gen, t->iovec.iov_len, err);
if (err == 0)
err = -ECONNRESET;
goto err_out_unlock_return;
}
dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
__func__, (t->page_num) ? "partial" : "full",
t, t->gen, t->iovec.iov_len, t->page_num);
err = 0;
if (t->attached_pages)
err = netfs_trans_send_pages(t, st);
err_out_unlock_return:
if (st->need_reset)
netfs_state_exit(st);
netfs_state_unlock_send(st);
dprintk("%s: t: %p, gen: %u, err: %d.\n",
__func__, t, t->gen, err);
t->result = err;
return err;
}
static inline int netfs_trans_cmp(unsigned int gen, unsigned int new)
{
if (gen < new)
return 1;
if (gen > new)
return -1;
return 0;
}
struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen)
{
struct rb_root *root = &st->trans_root;
struct rb_node *n = root->rb_node;
struct netfs_trans_dst *tmp, *ret = NULL;
struct netfs_trans *t;
int cmp;
while (n) {
tmp = rb_entry(n, struct netfs_trans_dst, state_entry);
t = tmp->trans;
cmp = netfs_trans_cmp(t->gen, gen);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else {
ret = tmp;
break;
}
}
return ret;
}
static int netfs_trans_insert(struct netfs_trans_dst *ndst, struct netfs_state *st)
{
struct rb_root *root = &st->trans_root;
struct rb_node **n = &root->rb_node, *parent = NULL;
struct netfs_trans_dst *ret = NULL, *tmp;
struct netfs_trans *t = NULL, *new = ndst->trans;
int cmp;
while (*n) {
parent = *n;
tmp = rb_entry(parent, struct netfs_trans_dst, state_entry);
t = tmp->trans;
cmp = netfs_trans_cmp(t->gen, new->gen);
if (cmp < 0)
n = &parent->rb_left;
else if (cmp > 0)
n = &parent->rb_right;
else {
ret = tmp;
break;
}
}
if (ret) {
printk("%s: exist: old: gen: %u, flags: %x, send_time: %lu, "
"new: gen: %u, flags: %x, send_time: %lu.\n",
__func__, t->gen, t->flags, ret->send_time,
new->gen, new->flags, ndst->send_time);
return -EEXIST;
}
rb_link_node(&ndst->state_entry, parent, n);
rb_insert_color(&ndst->state_entry, root);
ndst->send_time = jiffies;
return 0;
}
int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st)
{
if (dst && dst->state_entry.rb_parent_color) {
rb_erase(&dst->state_entry, &st->trans_root);
dst->state_entry.rb_parent_color = 0;
return 1;
}
return 0;
}
static int netfs_trans_remove_state(struct netfs_trans_dst *dst)
{
int ret;
struct netfs_state *st = dst->state;
mutex_lock(&st->trans_lock);
ret = netfs_trans_remove_nolock(dst, st);
mutex_unlock(&st->trans_lock);
return ret;
}
/*
* Create new destination for given transaction associated with given network state.
* Transaction's reference counter is bumped and will be dropped when either
* reply is received or when async timeout detection task will fail resending
* and drop transaction.
*/
static int netfs_trans_push_dst(struct netfs_trans *t, struct netfs_state *st)
{
struct netfs_trans_dst *dst;
int err;
dst = mempool_alloc(netfs_trans_dst_pool, GFP_KERNEL);
if (!dst)
return -ENOMEM;
dst->retries = 0;
dst->send_time = 0;
dst->state = st;
dst->trans = t;
netfs_trans_get(t);
mutex_lock(&st->trans_lock);
err = netfs_trans_insert(dst, st);
mutex_unlock(&st->trans_lock);
if (err)
goto err_out_free;
spin_lock(&t->dst_lock);
list_add_tail(&dst->trans_entry, &t->dst_list);
spin_unlock(&t->dst_lock);
return 0;
err_out_free:
t->result = err;
netfs_trans_put(t);
mempool_free(dst, netfs_trans_dst_pool);
return err;
}
static void netfs_trans_free_dst(struct netfs_trans_dst *dst)
{
netfs_trans_put(dst->trans);
mempool_free(dst, netfs_trans_dst_pool);
}
static void netfs_trans_remove_dst(struct netfs_trans_dst *dst)
{
if (netfs_trans_remove_state(dst))
netfs_trans_free_dst(dst);
}
/*
* Drop destination transaction entry when we know it.
*/
void netfs_trans_drop_dst(struct netfs_trans_dst *dst)
{
struct netfs_trans *t = dst->trans;
spin_lock(&t->dst_lock);
list_del_init(&dst->trans_entry);
spin_unlock(&t->dst_lock);
netfs_trans_remove_dst(dst);
}
/*
* Drop destination transaction entry when we know it and when we
* already removed dst from state tree.
*/
void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst)
{
struct netfs_trans *t = dst->trans;
spin_lock(&t->dst_lock);
list_del_init(&dst->trans_entry);
spin_unlock(&t->dst_lock);
netfs_trans_free_dst(dst);
}
/*
* This drops destination transaction entry from appropriate network state
* tree and drops related reference counter. It is possible that transaction
* will be freed here if its reference counter hits zero.
* Destination transaction entry will be freed.
*/
void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st)
{
struct netfs_trans_dst *dst, *tmp, *ret = NULL;
spin_lock(&t->dst_lock);
list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
if (dst->state == st) {
ret = dst;
list_del(&dst->trans_entry);
break;
}
}
spin_unlock(&t->dst_lock);
if (ret)
netfs_trans_remove_dst(ret);
}
/*
* This drops destination transaction entry from appropriate network state
* tree and drops related reference counter. It is possible that transaction
* will be freed here if its reference counter hits zero.
* Destination transaction entry will be freed.
*/
void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st)
{
struct netfs_trans_dst *dst, *tmp, *ret;
spin_lock(&t->dst_lock);
ret = list_entry(t->dst_list.prev, struct netfs_trans_dst, trans_entry);
if (ret->state != st) {
ret = NULL;
list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
if (dst->state == st) {
ret = dst;
list_del_init(&dst->trans_entry);
break;
}
}
} else {
list_del(&ret->trans_entry);
}
spin_unlock(&t->dst_lock);
if (ret)
netfs_trans_remove_dst(ret);
}
static int netfs_trans_push(struct netfs_trans *t, struct netfs_state *st)
{
int err;
err = netfs_trans_push_dst(t, st);
if (err)
return err;
err = netfs_trans_send(t, st);
if (err)
goto err_out_free;
if (t->flags & NETFS_TRANS_SINGLE_DST)
pohmelfs_switch_active(st->psb);
return 0;
err_out_free:
t->result = err;
netfs_trans_drop_last(t, st);
return err;
}
int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb)
{
struct pohmelfs_config *c;
int err = -ENODEV;
struct netfs_state *st;
#if 0
dprintk("%s: t: %p, gen: %u, size: %u, page_num: %u, active: %p.\n",
__func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state);
#endif
mutex_lock(&psb->state_lock);
list_for_each_entry(c, &psb->state_list, config_entry) {
st = &c->state;
if (t->flags & NETFS_TRANS_SINGLE_DST) {
if (!(st->ctl.perm & POHMELFS_IO_PERM_READ))
continue;
} else {
if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE))
continue;
}
if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio) &&
(t->flags & NETFS_TRANS_SINGLE_DST))
st = &psb->active_state->state;
err = netfs_trans_push(t, st);
if (!err && (t->flags & NETFS_TRANS_SINGLE_DST))
break;
}
mutex_unlock(&psb->state_lock);
#if 0
dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
__func__, t, t->gen, t->iovec.iov_len, t->page_num, err);
#endif
if (err)
t->result = err;
return err;
}
int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
{
int err;
struct netfs_cmd *cmd = t->iovec.iov_base;
t->gen = atomic_inc_return(&psb->trans_gen);
cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
cmd->cmd = NETFS_TRANS;
cmd->start = t->gen;
cmd->id = 0;
if (psb->perform_crypto) {
cmd->ext = psb->crypto_attached_size;
cmd->csize = psb->crypto_attached_size;
}
dprintk("%s: t: %u, size: %u, iov_len: %zu, attached_size: %u, attached_pages: %u.\n",
__func__, t->gen, cmd->size, t->iovec.iov_len, t->attached_size, t->attached_pages);
err = pohmelfs_trans_crypt(t, psb);
if (err) {
t->result = err;
netfs_convert_cmd(cmd);
dprintk("%s: trans: %llu, crypto_attached_size: %u, attached_size: %u, attached_pages: %d, trans_size: %u, err: %d.\n",
__func__, cmd->start, psb->crypto_attached_size, t->attached_size, t->attached_pages, cmd->size, err);
}
netfs_trans_put(t);
return err;
}
/*
* Resend transaction to remote server(s).
* If new servers were added into superblock, we can try to send data
* to them too.
*
* It is called under superblock's state_lock, so we can safely
* dereference psb->state_list. Also, transaction's reference counter is
* bumped, so it can not go away under us, thus we can safely access all
* its members. State is locked.
*
* This function returns 0 if transaction was successfully sent to at
* least one destination target.
*/
int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb)
{
struct netfs_trans_dst *dst;
struct netfs_state *st;
struct pohmelfs_config *c;
int err, exist, error = -ENODEV;
list_for_each_entry(c, &psb->state_list, config_entry) {
st = &c->state;
exist = 0;
spin_lock(&t->dst_lock);
list_for_each_entry(dst, &t->dst_list, trans_entry) {
if (st == dst->state) {
exist = 1;
break;
}
}
spin_unlock(&t->dst_lock);
if (exist) {
if (!(t->flags & NETFS_TRANS_SINGLE_DST) ||
(c->config_entry.next == &psb->state_list)) {
dprintk("%s: resending st: %p, t: %p, gen: %u.\n",
__func__, st, t, t->gen);
err = netfs_trans_send(t, st);
if (!err)
error = 0;
}
continue;
}
dprintk("%s: pushing/resending st: %p, t: %p, gen: %u.\n",
__func__, st, t, t->gen);
err = netfs_trans_push(t, st);
if (err)
continue;
error = 0;
if (t->flags & NETFS_TRANS_SINGLE_DST)
break;
}
t->result = error;
return error;
}
void *netfs_trans_add(struct netfs_trans *t, unsigned int size)
{
struct iovec *io = &t->iovec;
void *ptr;
if (size > t->total_size) {
ptr = ERR_PTR(-EINVAL);
goto out;
}
if (io->iov_len + size > t->total_size) {
dprintk("%s: too big size t: %p, gen: %u, iov_len: %zu, size: %u, total: %u.\n",
__func__, t, t->gen, io->iov_len, size, t->total_size);
ptr = ERR_PTR(-E2BIG);
goto out;
}
ptr = io->iov_base + io->iov_len;
io->iov_len += size;
out:
dprintk("%s: t: %p, gen: %u, size: %u, total: %zu.\n",
__func__, t, t->gen, size, io->iov_len);
return ptr;
}
void netfs_trans_free(struct netfs_trans *t)
{
if (t->eng)
pohmelfs_crypto_thread_make_ready(t->eng->thread);
kfree(t);
}
struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
unsigned int flags, unsigned int nr)
{
struct netfs_trans *t;
unsigned int num, cont, pad, size_no_trans;
unsigned int crypto_added = 0;
struct netfs_cmd *cmd;
if (psb->perform_crypto)
crypto_added = psb->crypto_attached_size;
/*
* |sizeof(struct netfs_trans)|
* |sizeof(struct netfs_cmd)| - transaction header
* |size| - buffer with requested size
* |padding| - crypto padding, zero bytes
* |nr * sizeof(struct page *)| - array of page pointers
*
* Overall size should be less than PAGE_SIZE for guaranteed allocation.
*/
cont = size;
size = ALIGN(size, psb->crypto_align_size);
pad = size - cont;
size_no_trans = size + sizeof(struct netfs_cmd) * 2 + crypto_added;
cont = sizeof(struct netfs_trans) + size_no_trans;
num = (PAGE_SIZE - cont)/sizeof(struct page *);
if (nr > num)
nr = num;
t = kzalloc(cont + nr*sizeof(struct page *), GFP_NOIO);
if (!t)
goto err_out_exit;
t->iovec.iov_base = (void *)(t + 1);
t->pages = (struct page **)(t->iovec.iov_base + size_no_trans);
/*
* Reserving space for transaction header.
*/
t->iovec.iov_len = sizeof(struct netfs_cmd) + crypto_added;
netfs_trans_init_static(t, nr, size_no_trans);
t->flags = flags;
t->psb = psb;
cmd = (struct netfs_cmd *)t->iovec.iov_base;
cmd->size = size;
cmd->cpad = pad;
cmd->csize = crypto_added;
dprintk("%s: t: %p, gen: %u, size: %u, padding: %u, align_size: %u, flags: %x, "
"page_num: %u, base: %p, pages: %p.\n",
__func__, t, t->gen, size, pad, psb->crypto_align_size, flags, nr,
t->iovec.iov_base, t->pages);
return t;
err_out_exit:
return NULL;
}
int netfs_trans_init(void)
{
int err = -ENOMEM;
netfs_trans_dst = kmem_cache_create("netfs_trans_dst", sizeof(struct netfs_trans_dst),
0, 0, NULL);
if (!netfs_trans_dst)
goto err_out_exit;
netfs_trans_dst_pool = mempool_create_slab_pool(256, netfs_trans_dst);
if (!netfs_trans_dst_pool)
goto err_out_free;
return 0;
err_out_free:
kmem_cache_destroy(netfs_trans_dst);
err_out_exit:
return err;
}
void netfs_trans_exit(void)
{
mempool_destroy(netfs_trans_dst_pool);
kmem_cache_destroy(netfs_trans_dst);
}