Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (43 commits)
  staging: slicoss: update README
  otus/zdusb.c: additional USB idnetifier
  Staging: go7007: fix build issues
  Staging: sxg: Fix leaks and checksum errors in transmit code path
  Staging: sxg: Fix sleep in atomic context warning while loading driver
  Staging: sxg: Use correct queue_id for transmitting non-TCP packets
  Staging: sxg: Fire watchdog timer at end of open routine to change the link
  Staging: Pohmelfs: Add load balancing between network states with the same priority.
  Staging: Pohmelfs: Added IO permissions and priorities.
  Staging: Pohmelfs: Added ->show_stats() callback.
  Staging: Pohmelfs: Drop ftrans debugging code.
  Staging: Pohmelfs: Use wait_on_page_timeout when waiting for remote directory sync instead of hardcoded 25 seconds.
  Staging: Pohmelfs: Reduce debugging noise about non-existing objects.
  Staging: Pohmelfs: Sync fs before killing it, since dentry cache is shrunk before writeback is invoked via generic_shutdown_super()
  Staging: Pohmelfs: Extend remount option.
  Staging: Pohmelfs: Set NETFS_INODE_REMOTE_SYNCED and clear NETFS_INODE_OWNED bits in the root inode.
  Staging: Pohmelfs: Added 'need_lock' variable into debug print.
  Staging: Pohmelfs: Disable read lock in pohmelfs_getattr().
  Staging: Pohmelfs: Move parent lock to the place where we really have to send a lookup request to the server.
  Staging: pohmelfs: Populate dentry cache when receiving the new readdir entry.
  ...
This commit is contained in:
Linus Torvalds 2009-04-17 13:53:34 -07:00
commit d022bafbb6
29 changed files with 664 additions and 372 deletions

View File

@ -56,9 +56,10 @@ workloads and can fully utilize the bandwidth to the servers when doing bulk
data transfers.
POHMELFS clients operate with a working set of servers and are capable of balancing read-only
operations (like lookups or directory listings) between them.
operations (like lookups or directory listings) between them according to IO priorities.
Administrators can add or remove servers from the set at run-time via special commands (described
in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers.
in Documentation/pohmelfs/info.txt file). Writes are replicated to all servers, which are connected
with write permission turned on. IO priority and permissions can be changed in run-time.
POHMELFS is capable of full data channel encryption and/or strong crypto hashing.
One can select any kernel supported cipher, encryption mode, hash type and operation mode

View File

@ -1,6 +1,8 @@
POHMELFS usage information.
Mount options:
Mount options.
All but index, number of crypto threads and maximum IO size can changed via remount.
idx=%u
Each mountpoint is associated with a special index via this option.
Administrator can add or remove servers from the given index, so all mounts,
@ -52,16 +54,27 @@ mcache_timeout=%u
Usage examples.
Add (or remove if it already exists) server server1.net:1025 into the working set with index $idx
Add server server1.net:1025 into the working set with index $idx
with appropriate hash algorithm and key file and cipher algorithm, mode and key file:
$cfg -a server1.net -p 1025 -i $idx -K $hash_key -k $cipher_key
$cfg A add -a server1.net -p 1025 -i $idx -K $hash_key -k $cipher_key
Mount filesystem with given index $idx to /mnt mountpoint.
Client will connect to all servers specified in the working set via previous command:
mount -t pohmel -o idx=$idx q /mnt
One can add or remove servers from working set after mounting too.
Change permissions to read-only (-I 1 option, '-I 2' - write-only, 3 - rw):
$cfg A modify -a server1.net -p 1025 -i $idx -I 1
Change IO priority to 123 (node with the highest priority gets read requests).
$cfg A modify -a server1.net -p 1025 -i $idx -P 123
One can check currect status of all connections in the mountstats file:
# cat /proc/$PID/mountstats
...
device none mounted on /mnt with fstype pohmel
idx addr(:port) socket_type protocol active priority permissions
0 server1.net:1026 1 6 1 250 1
0 server2.net:1025 1 6 1 123 3
Server installation.

View File

@ -41,6 +41,8 @@ static int binder_last_id;
static struct proc_dir_entry *binder_proc_dir_entry_root;
static struct proc_dir_entry *binder_proc_dir_entry_proc;
static struct hlist_head binder_dead_nodes;
static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static int binder_read_proc_proc(
char *page, char **start, off_t off, int count, int *eof, void *data);
@ -54,11 +56,7 @@ static int binder_read_proc_proc(
#define SZ_4M 0x400000
#endif
#ifndef __i386__
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE | VM_EXEC)
#else
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
#endif
#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
@ -236,6 +234,12 @@ struct binder_buffer {
uint8_t data[0];
};
enum {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
@ -245,8 +249,11 @@ struct binder_proc {
int pid;
struct vm_area_struct *vma;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;
size_t user_buffer_offset;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
@ -310,12 +317,14 @@ struct binder_transaction {
uid_t sender_euid;
};
static void binder_defer_work(struct binder_proc *proc, int defer);
/*
* copied from get_unused_fd_flags
*/
int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
struct files_struct *files = get_files_struct(tsk);
struct files_struct *files = proc->files;
int fd, error;
struct fdtable *fdt;
unsigned long rlim_cur;
@ -337,9 +346,9 @@ int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
* will limit the total number of files that can be opened.
*/
rlim_cur = 0;
if (lock_task_sighand(tsk, &irqs)) {
rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
unlock_task_sighand(tsk, &irqs);
if (lock_task_sighand(proc->tsk, &irqs)) {
rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
unlock_task_sighand(proc->tsk, &irqs);
}
if (fd >= rlim_cur)
goto out;
@ -375,7 +384,6 @@ int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
out:
spin_unlock(&files->file_lock);
put_files_struct(files);
return error;
}
@ -383,9 +391,9 @@ int task_get_unused_fd_flags(struct task_struct *tsk, int flags)
* copied from fd_install
*/
static void task_fd_install(
struct task_struct *tsk, unsigned int fd, struct file *file)
struct binder_proc *proc, unsigned int fd, struct file *file)
{
struct files_struct *files = get_files_struct(tsk);
struct files_struct *files = proc->files;
struct fdtable *fdt;
if (files == NULL)
@ -396,7 +404,6 @@ static void task_fd_install(
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
spin_unlock(&files->file_lock);
put_files_struct(files);
}
/*
@ -413,10 +420,10 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
/*
* copied from sys_close
*/
static long task_close_fd(struct task_struct *tsk, unsigned int fd)
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
struct file *filp;
struct files_struct *files = get_files_struct(tsk);
struct files_struct *files = proc->files;
struct fdtable *fdt;
int retval;
@ -443,12 +450,10 @@ static long task_close_fd(struct task_struct *tsk, unsigned int fd)
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
put_files_struct(files);
return retval;
out_unlock:
spin_unlock(&files->file_lock);
put_files_struct(files);
return -EBADF;
}
@ -618,7 +623,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
proc->pid, page_addr);
goto err_map_kernel_failed;
}
user_page_addr = (size_t)page_addr + proc->user_buffer_offset;
user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) {
printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
@ -639,7 +645,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
page_addr -= PAGE_SIZE) {
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (size_t)page_addr +
zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE, NULL);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
@ -720,18 +726,19 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
"er %p size %zd\n", proc->pid, size, buffer, buffer_size);
has_page_addr =
(void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK);
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
if (n == NULL) {
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
buffer_size = size; /* no room for other buffers */
else
buffer_size = size + sizeof(struct binder_buffer);
}
end_page_addr = (void *)PAGE_ALIGN((size_t)buffer->data + buffer_size);
end_page_addr =
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
if (binder_update_page_range(proc, 1,
(void *)PAGE_ALIGN((size_t)buffer->data), end_page_addr, NULL))
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
return NULL;
rb_erase(best_fit, &proc->free_buffers);
@ -762,12 +769,12 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
static void *buffer_start_page(struct binder_buffer *buffer)
{
return (void *)((size_t)buffer & PAGE_MASK);
return (void *)((uintptr_t)buffer & PAGE_MASK);
}
static void *buffer_end_page(struct binder_buffer *buffer)
{
return (void *)(((size_t)(buffer + 1) - 1) & PAGE_MASK);
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(
@ -845,8 +852,8 @@ static void binder_free_buf(
}
binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((size_t)buffer->data),
(void *)(((size_t)buffer->data + buffer_size) & PAGE_MASK),
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
rb_erase(&buffer->rb_node, &proc->allocated_buffers);
buffer->free = 1;
@ -1345,6 +1352,17 @@ binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("binder: %d:%d got new "
"transaction with bad transaction stack"
", transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
@ -1434,10 +1452,19 @@ binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offsets size, %zd\n",
proc->pid, thread->pid, tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp)) {
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(void *))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
@ -1544,13 +1571,13 @@ binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
target_fd = task_get_unused_fd_flags(target_proc->tsk, O_CLOEXEC);
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc->tsk, target_fd, file);
task_fd_install(target_proc, target_fd, file);
if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
@ -1655,7 +1682,9 @@ binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer
off_end = (void *)offp + buffer->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > buffer->data_size - sizeof(*fp)) {
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(void *))) {
printk(KERN_ERR "binder: transaction release %d bad"
"offset %zd, size %zd\n", debug_id, *offp, buffer->data_size);
continue;
@ -1691,7 +1720,7 @@ binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer
if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
printk(KERN_INFO " fd %ld\n", fp->handle);
if (failed_at)
task_close_fd(proc->tsk, fp->handle);
task_close_fd(proc, fp->handle);
break;
default:
@ -2340,7 +2369,7 @@ binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)((void *)t->buffer->data + proc->user_buffer_offset);
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
@ -2656,6 +2685,7 @@ static void binder_vma_open(struct vm_area_struct *vma)
(unsigned long)pgprot_val(vma->vm_page_prot));
dump_stack();
}
static void binder_vma_close(struct vm_area_struct *vma)
{
struct binder_proc *proc = vma->vm_private_data;
@ -2666,6 +2696,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
proc->vma = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static struct vm_operations_struct binder_vm_ops = {
@ -2698,6 +2729,12 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
@ -2705,7 +2742,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_get_vm_area_failed;
}
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (size_t)proc->buffer;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
@ -2738,6 +2775,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
@ -2745,10 +2783,12 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
err_alloc_small_buf_failed:
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
mutex_unlock(&binder_lock);
err_already_mapped:
err_bad_arg:
printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
@ -2780,6 +2820,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
if (binder_proc_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);
}
@ -2788,11 +2829,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
static int binder_flush(struct file *filp, fl_owner_t id)
{
struct rb_node *n;
struct binder_proc *proc = filp->private_data;
int wake_count = 0;
mutex_lock(&binder_lock);
binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
return 0;
}
static void binder_deferred_flush(struct binder_proc *proc)
{
struct rb_node *n;
int wake_count = 0;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
@ -2802,28 +2849,35 @@ static int binder_flush(struct file *filp, fl_owner_t id)
}
}
wake_up_interruptible_all(&proc->wait);
mutex_unlock(&binder_lock);
if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
return 0;
}
static int binder_release(struct inode *nodp, struct file *filp)
{
struct hlist_node *pos;
struct binder_transaction *t;
struct rb_node *n;
struct binder_proc *proc = filp->private_data;
int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
if (binder_proc_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
}
mutex_lock(&binder_lock);
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
return 0;
}
static void binder_deferred_release(struct binder_proc *proc)
{
struct hlist_node *pos;
struct binder_transaction *t;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
BUG_ON(proc->vma);
BUG_ON(proc->files);
hlist_del(&proc->proc_node);
if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
@ -2897,7 +2951,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
}
binder_stats.obj_deleted[BINDER_STAT_PROC]++;
mutex_unlock(&binder_lock);
page_count = 0;
if (proc->pages) {
@ -2921,7 +2974,57 @@ static int binder_release(struct inode *nodp, struct file *filp)
proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count);
kfree(proc);
return 0;
}
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
struct files_struct *files;
int defer;
do {
mutex_lock(&binder_lock);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
hlist_del_init(&proc->deferred_work_node);
defer = proc->deferred_work;
proc->deferred_work = 0;
} else {
proc = NULL;
defer = 0;
}
mutex_unlock(&binder_deferred_lock);
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES)
if ((files = proc->files))
proc->files = NULL;
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
mutex_unlock(&binder_lock);
if (files)
put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void binder_defer_work(struct binder_proc *proc, int defer)
{
mutex_lock(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
schedule_work(&binder_deferred_work);
}
mutex_unlock(&binder_deferred_lock);
}
static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)

View File

@ -5259,6 +5259,18 @@ static int at76_alloc_urbs(struct at76_priv *priv,
return 0;
}
static const struct net_device_ops at76_netdev_ops = {
.ndo_open = at76_open,
.ndo_stop = at76_stop,
.ndo_get_stats = at76_get_stats,
.ndo_start_xmit = at76_tx,
.ndo_tx_timeout = at76_tx_timeout,
.ndo_set_multicast_list = at76_set_multicast,
.ndo_set_mac_address = at76_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
};
/* Register network device and initialize the hardware */
static int at76_init_new_device(struct at76_priv *priv,
struct usb_interface *interface)
@ -5303,21 +5315,15 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->scan_mode = SCAN_TYPE_ACTIVE;
netdev->flags &= ~IFF_MULTICAST; /* not yet or never */
netdev->open = at76_open;
netdev->stop = at76_stop;
netdev->get_stats = at76_get_stats;
netdev->netdev_ops = &at76_netdev_ops;
netdev->ethtool_ops = &at76_ethtool_ops;
/* Add pointers to enable iwspy support. */
priv->wireless_data.spy_data = &priv->spy_data;
netdev->wireless_data = &priv->wireless_data;
netdev->hard_start_xmit = at76_tx;
netdev->tx_timeout = at76_tx_timeout;
netdev->watchdog_timeo = 2 * HZ;
netdev->wireless_handlers = &at76_handler_def;
netdev->set_multicast_list = at76_set_multicast;
netdev->set_mac_address = at76_set_mac_address;
dev_alloc_name(netdev, "wlan%d");
ret = register_netdev(priv->netdev);

View File

@ -284,6 +284,17 @@ static tEplKernel VEthRecvFrame(tEplFrameInfo * pFrameInfo_p)
return Ret;
}
static const struct net_device_ops epl_netdev_ops = {
.ndo_open = VEthOpen,
.ndo_stop = VEthClose,
.ndo_get_stats = VEthGetStats,
.ndo_start_xmit = VEthXmit,
.ndo_tx_timeout = VEthTimeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p)
{
tEplKernel Ret = kEplSuccessful;
@ -299,11 +310,7 @@ tEplKernel VEthAddInstance(tEplDllkInitParam *pInitParam_p)
goto Exit;
}
pVEthNetDevice_g->open = VEthOpen;
pVEthNetDevice_g->stop = VEthClose;
pVEthNetDevice_g->get_stats = VEthGetStats;
pVEthNetDevice_g->hard_start_xmit = VEthXmit;
pVEthNetDevice_g->tx_timeout = VEthTimeout;
pVEthNetDevice_g->netdev_ops = &epl_netdev_ops;
pVEthNetDevice_g->watchdog_timeo = EPL_VETH_TX_TIMEOUT;
pVEthNetDevice_g->destructor = free_netdev;

View File

@ -112,6 +112,19 @@ void et131x_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static const struct net_device_ops et131x_netdev_ops = {
.ndo_open = et131x_open,
.ndo_stop = et131x_close,
.ndo_start_xmit = et131x_tx,
.ndo_set_multicast_list = et131x_multicast,
.ndo_tx_timeout = et131x_tx_timeout,
.ndo_change_mtu = et131x_change_mtu,
.ndo_set_mac_address = et131x_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats = et131x_stats,
.ndo_do_ioctl = et131x_ioctl,
};
/**
* et131x_device_alloc
*
@ -142,16 +155,8 @@ struct net_device *et131x_device_alloc(void)
*/
//netdev->init = &et131x_init;
//netdev->set_config = &et131x_config;
netdev->get_stats = &et131x_stats;
netdev->open = &et131x_open;
netdev->stop = &et131x_close;
netdev->do_ioctl = &et131x_ioctl;
netdev->set_multicast_list = &et131x_multicast;
netdev->hard_start_xmit = &et131x_tx;
netdev->tx_timeout = &et131x_tx_timeout;
netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
netdev->change_mtu = &et131x_change_mtu;
netdev->set_mac_address = &et131x_set_mac_addr;
netdev->netdev_ops = &et131x_netdev_ops;
//netdev->ethtool_ops = &et131x_ethtool_ops;

View File

@ -268,21 +268,6 @@ int go7007_register_encoder(struct go7007 *go)
init_i2c_module(&go->i2c_adapter,
go->board_info->i2c_devs[i].id,
go->board_info->i2c_devs[i].addr);
#ifdef TUNER_SET_TYPE_ADDR
if (go->tuner_type >= 0) {
struct tuner_setup tun_setup = {
.mode_mask = T_ANALOG_TV,
.addr = ADDR_UNSET,
.type = go->tuner_type
};
i2c_clients_command(&go->i2c_adapter,
TUNER_SET_TYPE_ADDR, &tun_setup);
}
#else
if (go->tuner_type >= 0)
i2c_clients_command(&go->i2c_adapter,
TUNER_SET_TYPE, &go->tuner_type);
#endif
if (go->board_id == GO7007_BOARDID_ADLINK_MPG24)
i2c_clients_command(&go->i2c_adapter,
DECODER_SET_CHANNEL, &go->channel_number);

View File

@ -386,6 +386,7 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
struct wis_sony_tuner *t = i2c_get_clientdata(client);
switch (cmd) {
#if 0
#ifdef TUNER_SET_TYPE_ADDR
case TUNER_SET_TYPE_ADDR:
{
@ -463,6 +464,7 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
t->type, sony_tuners[t->type - 200].name);
break;
}
#endif
case VIDIOC_G_FREQUENCY:
{
struct v4l2_frequency *f = arg;

View File

@ -27,11 +27,12 @@ int line6_init_audio(struct usb_line6 *line6)
{
static int dev;
struct snd_card *card;
int err;
card = snd_card_new(line6_index[dev], line6_id[dev], THIS_MODULE, 0);
if (card == NULL)
return -ENOMEM;
err = snd_card_create(line6_index[dev], line6_id[dev], THIS_MODULE, 0,
&card);
if (err < 0)
return err;
line6->card = card;

View File

@ -822,6 +822,21 @@ int zfLnxVapXmitFrame(struct sk_buff *skb, struct net_device *dev)
return 0;
}
static const struct net_device_ops vap_netdev_ops = {
.ndo_open = zfLnxVapOpen,
.ndo_stop = zfLnxVapClose,
.ndo_start_xmit = zfLnxVapXmitFrame,
.ndo_get_stats = usbdrv_get_stats,
.ndo_change_mtu = usbdrv_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef ZM_HOSTAPD_SUPPORT
.ndo_do_ioctl = usbdrv_ioctl,
#else
.ndo_do_ioctl = NULL,
#endif
};
int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId)
{
/* Allocate net device structure */
@ -846,16 +861,7 @@ int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId)
vap[vapId].dev->ml_priv = parentDev->ml_priv;
//dev->hard_start_xmit = &zd1212_wds_xmit_frame;
vap[vapId].dev->hard_start_xmit = &zfLnxVapXmitFrame;
vap[vapId].dev->open = &zfLnxVapOpen;
vap[vapId].dev->stop = &zfLnxVapClose;
vap[vapId].dev->get_stats = &usbdrv_get_stats;
vap[vapId].dev->change_mtu = &usbdrv_change_mtu;
#ifdef ZM_HOSTAPD_SUPPORT
vap[vapId].dev->do_ioctl = usbdrv_ioctl;
#else
vap[vapId].dev->do_ioctl = NULL;
#endif
vap[vapId].dev->netdev_ops = &vap_netdev_ops;
vap[vapId].dev->destructor = free_netdev;
vap[vapId].dev->tx_queue_len = 0;
@ -1068,6 +1074,18 @@ void zfLnxUnlinkAllUrbs(struct usbdrv_private *macp)
usb_unlink_urb(macp->RegInUrb);
}
static const struct net_device_ops otus_netdev_ops = {
.ndo_open = usbdrv_open,
.ndo_stop = usbdrv_close,
.ndo_start_xmit = usbdrv_xmit_frame,
.ndo_change_mtu = usbdrv_change_mtu,
.ndo_get_stats = usbdrv_get_stats,
.ndo_set_multicast_list = usbdrv_set_multi,
.ndo_set_mac_address = usbdrv_set_mac,
.ndo_do_ioctl = usbdrv_ioctl,
.ndo_validate_addr = eth_validate_addr,
};
u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp)
{
//unsigned char addr[6];
@ -1092,14 +1110,7 @@ u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp)
dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def;
#endif
dev->open = usbdrv_open;
dev->hard_start_xmit = usbdrv_xmit_frame;
dev->stop = usbdrv_close;
dev->change_mtu = &usbdrv_change_mtu;
dev->get_stats = usbdrv_get_stats;
dev->set_multicast_list = usbdrv_set_multi;
dev->set_mac_address = usbdrv_set_mac;
dev->do_ioctl = usbdrv_ioctl;
dev->netdev_ops = &otus_netdev_ops;
dev->flags |= IFF_MULTICAST;

View File

@ -48,7 +48,8 @@ static const char driver_name[] = "Otus";
static struct usb_device_id zd1221_ids [] = {
{ USB_DEVICE(VENDOR_ATHR, PRODUCT_AR9170) },
{ USB_DEVICE(VENDOR_DLINK, PRODUCT_DWA160A) },
{ USB_DEVICE(0x0846, 0x9010) },
{ USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WNDA3100) },
{ USB_DEVICE(VENDOR_NETGEAR, PRODUCT_WN111v2) },
{ } /* Terminating entry */
};

View File

@ -40,4 +40,8 @@
#define VENDOR_DLINK 0x07D1 //Dlink
#define PRODUCT_DWA160A 0x3C10
#define VENDOR_NETGEAR 0x0846 /* NetGear */
#define PRODUCT_WNDA3100 0x9010
#define PRODUCT_WN111v2 0x9001
#endif

View File

@ -81,6 +81,45 @@ static struct pohmelfs_config_group *pohmelfs_find_create_config_group(unsigned
return g;
}
static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst)
{
struct pohmelfs_config *tmp;
INIT_LIST_HEAD(&dst->config_entry);
list_for_each_entry(tmp, &psb->state_list, config_entry) {
if (dst->state.ctl.prio > tmp->state.ctl.prio)
list_add_tail(&dst->config_entry, &tmp->config_entry);
}
if (list_empty(&dst->config_entry))
list_add_tail(&dst->config_entry, &psb->state_list);
}
static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb,
struct pohmelfs_config *dst, struct pohmelfs_config *new)
{
if ((dst->state.ctl.prio == new->state.ctl.prio) &&
(dst->state.ctl.perm == new->state.ctl.perm))
return 0;
dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n",
__func__, dst->state.ctl.prio, dst->state.ctl.perm,
new->state.ctl.prio, new->state.ctl.perm);
dst->state.ctl.prio = new->state.ctl.prio;
dst->state.ctl.perm = new->state.ctl.perm;
list_del_init(&dst->config_entry);
pohmelfs_insert_config_entry(psb, dst);
return 0;
}
/*
* pohmelfs_copy_config() is used to copy new state configs from the
* config group (controlled by the netlink messages) into the superblock.
* This happens either at startup time where no transactions can access
* the list of the configs (and thus list of the network states), or at
* run-time, where it is protected by the psb->state_lock.
*/
int pohmelfs_copy_config(struct pohmelfs_sb *psb)
{
struct pohmelfs_config_group *g;
@ -103,7 +142,9 @@ int pohmelfs_copy_config(struct pohmelfs_sb *psb)
err = 0;
list_for_each_entry(dst, &psb->state_list, config_entry) {
if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) {
err = -EEXIST;
err = pohmelfs_move_config_entry(psb, dst, c);
if (!err)
err = -EEXIST;
break;
}
}
@ -119,7 +160,7 @@ int pohmelfs_copy_config(struct pohmelfs_sb *psb)
memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl));
list_add_tail(&dst->config_entry, &psb->state_list);
pohmelfs_insert_config_entry(psb, dst);
err = pohmelfs_state_init_one(psb, dst);
if (err) {
@ -248,6 +289,13 @@ static int pohmelfs_cn_disp(struct cn_msg *msg)
return err;
}
static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new)
{
old->perm = new->perm;
old->prio = new->prio;
return 0;
}
static int pohmelfs_cn_ctl(struct cn_msg *msg, int action)
{
struct pohmelfs_config_group *g;
@ -278,6 +326,9 @@ static int pohmelfs_cn_ctl(struct cn_msg *msg, int action)
g->num_entry--;
kfree(c);
goto out_unlock;
} else if (action == POHMELFS_FLAGS_MODIFY) {
err = pohmelfs_modify_config(sc, ctl);
goto out_unlock;
} else {
err = -EEXIST;
goto out_unlock;
@ -296,6 +347,7 @@ static int pohmelfs_cn_ctl(struct cn_msg *msg, int action)
}
memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl));
g->num_entry++;
list_add_tail(&c->config_entry, &g->config_list);
out_unlock:
@ -401,10 +453,9 @@ static void pohmelfs_cn_callback(void *data)
switch (msg->flags) {
case POHMELFS_FLAGS_ADD:
err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_ADD);
break;
case POHMELFS_FLAGS_DEL:
err = pohmelfs_cn_ctl(msg, POHMELFS_FLAGS_DEL);
case POHMELFS_FLAGS_MODIFY:
err = pohmelfs_cn_ctl(msg, msg->flags);
break;
case POHMELFS_FLAGS_SHOW:
err = pohmelfs_cn_disp(msg);

View File

@ -328,7 +328,7 @@ static int pohmelfs_sync_remote_dir(struct pohmelfs_inode *pi)
{
struct inode *inode = &pi->vfs_inode;
struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
long ret = msecs_to_jiffies(25000);
long ret = psb->wait_on_page_timeout;
int err;
dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n",
@ -389,11 +389,11 @@ static int pohmelfs_readdir(struct file *file, void *dirent, filldir_t filldir)
dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n",
__func__, pi->ino, (u64)file->f_pos,
(unsigned long)file->private_data);
#if 0
err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
if (err)
return err;
#endif
err = pohmelfs_sync_remote_dir(pi);
if (err)
return err;
@ -513,10 +513,6 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct
need_lock = pohmelfs_need_lock(parent, lock_type);
err = pohmelfs_data_lock(parent, 0, ~0, lock_type);
if (err)
goto out;
str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
mutex_lock(&parent->offset_lock);
@ -525,8 +521,8 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct
ino = n->ino;
mutex_unlock(&parent->offset_lock);
dprintk("%s: 1 ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx.\n",
__func__, ino, inode, str.name, str.hash, parent->state);
dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n",
__func__, ino, inode, str.name, str.hash, parent->state, need_lock);
if (ino) {
inode = ilookup(dir->i_sb, ino);
@ -534,7 +530,7 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct
goto out;
}
dprintk("%s: dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n",
dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n",
__func__, dir, parent->ino,
str.name, str.len, parent->state, ino);
@ -543,6 +539,10 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct
goto out;
}
err = pohmelfs_data_lock(parent, 0, ~0, lock_type);
if (err)
goto out;
err = pohmelfs_lookup_single(parent, &str, ino);
if (err)
goto out;
@ -557,10 +557,10 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct
if (ino) {
inode = ilookup(dir->i_sb, ino);
printk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n",
dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n",
__func__, ino, inode, str.name, str.hash);
if (!inode) {
printk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n",
dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n",
__func__, ino, str.name, str.hash);
//return NULL;
return ERR_PTR(-EACCES);

View File

@ -1169,16 +1169,17 @@ static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
#if 0
struct pohmelfs_inode *pi = POHMELFS_I(inode);
int err;
err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
if (err)
return err;
dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
__func__, pi->ino, inode->i_mode, inode->i_uid,
inode->i_gid, inode->i_size);
#endif
generic_fillattr(inode, stat);
return 0;
@ -1342,14 +1343,6 @@ static void pohmelfs_put_super(struct super_block *sb)
kfree(psb);
sb->s_fs_info = NULL;
pohmelfs_ftrans_exit();
}
static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
{
*flags |= MS_RDONLY;
return 0;
}
static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@ -1394,42 +1387,33 @@ static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
return 0;
}
static const struct super_operations pohmelfs_sb_ops = {
.alloc_inode = pohmelfs_alloc_inode,
.destroy_inode = pohmelfs_destroy_inode,
.drop_inode = pohmelfs_drop_inode,
.write_inode = pohmelfs_write_inode,
.put_super = pohmelfs_put_super,
.remount_fs = pohmelfs_remount,
.statfs = pohmelfs_statfs,
.show_options = pohmelfs_show_options,
};
enum {
pohmelfs_opt_idx,
pohmelfs_opt_crypto_thread_num,
pohmelfs_opt_trans_max_pages,
pohmelfs_opt_crypto_fail_unsupported,
/* Remountable options */
pohmelfs_opt_trans_scan_timeout,
pohmelfs_opt_drop_scan_timeout,
pohmelfs_opt_wait_on_page_timeout,
pohmelfs_opt_trans_retries,
pohmelfs_opt_crypto_thread_num,
pohmelfs_opt_trans_max_pages,
pohmelfs_opt_crypto_fail_unsupported,
pohmelfs_opt_mcache_timeout,
};
static struct match_token pohmelfs_tokens[] = {
{pohmelfs_opt_idx, "idx=%u"},
{pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
{pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
{pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
{pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
{pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
{pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
{pohmelfs_opt_trans_retries, "trans_retries=%u"},
{pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
{pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
{pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
{pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
};
static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb)
static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
{
char *p;
substring_t args[MAX_OPT_ARGS];
@ -1449,6 +1433,9 @@ static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb)
if (err)
return err;
if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
continue;
switch (token) {
case pohmelfs_opt_idx:
psb->idx = option;
@ -1485,6 +1472,25 @@ static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb)
return 0;
}
static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
{
int err;
struct pohmelfs_sb *psb = POHMELFS_SB(sb);
unsigned long old_sb_flags = sb->s_flags;
err = pohmelfs_parse_options(data, psb, 1);
if (err)
goto err_out_restore;
if (!(*flags & MS_RDONLY))
sb->s_flags &= ~MS_RDONLY;
return 0;
err_out_restore:
sb->s_flags = old_sb_flags;
return err;
}
static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
{
struct inode *inode = &pi->vfs_inode;
@ -1753,6 +1759,57 @@ static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
return err;
}
static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
{
struct netfs_state *st;
struct pohmelfs_ctl *ctl;
struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
struct pohmelfs_config *c;
mutex_lock(&psb->state_lock);
seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
list_for_each_entry(c, &psb->state_list, config_entry) {
st = &c->state;
ctl = &st->ctl;
seq_printf(m, "%u ", ctl->idx);
if (ctl->addr.sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
//seq_printf(m, "%pi4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
seq_printf(m, "%u.%u.%u.%u:%u", NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port));
} else if (ctl->addr.sa_family == AF_INET6) {
struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
} else {
unsigned int i;
for (i=0; i<ctl->addrlen; ++i)
seq_printf(m, "%02x.", ctl->addr.addr[i]);
}
seq_printf(m, " %u %u %d %u %x\n",
ctl->type, ctl->proto,
st->socket != NULL,
ctl->prio, ctl->perm);
}
mutex_unlock(&psb->state_lock);
return 0;
}
static const struct super_operations pohmelfs_sb_ops = {
.alloc_inode = pohmelfs_alloc_inode,
.destroy_inode = pohmelfs_destroy_inode,
.drop_inode = pohmelfs_drop_inode,
.write_inode = pohmelfs_write_inode,
.put_super = pohmelfs_put_super,
.remount_fs = pohmelfs_remount,
.statfs = pohmelfs_statfs,
.show_options = pohmelfs_show_options,
.show_stats = pohmelfs_show_stats,
};
/*
* Allocate private superblock and create root dir.
*/
@ -1764,8 +1821,6 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
struct pohmelfs_inode *npi;
struct qstr str;
pohmelfs_ftrans_init();
psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
if (!psb)
goto err_out_exit;
@ -1816,7 +1871,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
mutex_init(&psb->state_lock);
INIT_LIST_HEAD(&psb->state_list);
err = pohmelfs_parse_options((char *) data, psb);
err = pohmelfs_parse_options((char *) data, psb, 0);
if (err)
goto err_out_free_sb;
@ -1845,6 +1900,8 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
err = PTR_ERR(npi);
goto err_out_crypto_exit;
}
set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
clear_bit(NETFS_INODE_OWNED, &npi->state);
root = &npi->vfs_inode;
@ -1887,11 +1944,29 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type,
mnt);
}
/*
* We need this to sync all inodes earlier, since when writeback
* is invoked from the umount/mntput path dcache is already shrunk,
* see generic_shutdown_super(), and no inodes can access the path.
*/
static void pohmelfs_kill_super(struct super_block *sb)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.range_start = 0,
.range_end = LLONG_MAX,
.nr_to_write = LONG_MAX,
};
generic_sync_sb_inodes(sb, &wbc);
kill_anon_super(sb);
}
static struct file_system_type pohmel_fs_type = {
.owner = THIS_MODULE,
.name = "pohmel",
.get_sb = pohmelfs_get_sb,
.kill_sb = kill_anon_super,
.kill_sb = pohmelfs_kill_super,
};
/*

View File

@ -41,7 +41,8 @@ static int pohmelfs_send_lock_trans(struct pohmelfs_inode *pi,
path_len = err;
err = -ENOMEM;
t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize, 0, 0);
t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize,
NETFS_TRANS_SINGLE_DST, 0);
if (!t)
goto err_out_exit;

View File

@ -26,55 +26,6 @@
#include "netfs.h"
static int pohmelfs_ftrans_size = 10240;
static u32 *pohmelfs_ftrans;
int pohmelfs_ftrans_init(void)
{
pohmelfs_ftrans = vmalloc(pohmelfs_ftrans_size * 4);
if (!pohmelfs_ftrans)
return -ENOMEM;
return 0;
}
void pohmelfs_ftrans_exit(void)
{
vfree(pohmelfs_ftrans);
}
void pohmelfs_ftrans_clean(u64 id)
{
if (pohmelfs_ftrans) {
u32 i = id & 0xffffffff;
int idx = i % pohmelfs_ftrans_size;
pohmelfs_ftrans[idx] = 0;
}
}
void pohmelfs_ftrans_update(u64 id)
{
if (pohmelfs_ftrans) {
u32 i = id & 0xffffffff;
int idx = i % pohmelfs_ftrans_size;
pohmelfs_ftrans[idx] = i;
}
}
int pohmelfs_ftrans_check(u64 id)
{
if (pohmelfs_ftrans) {
u32 i = id & 0xffffffff;
int idx = i % pohmelfs_ftrans_size;
return (pohmelfs_ftrans[idx] == i);
}
return -1;
}
/*
* Async machinery lives here.
* All commands being sent to server do _not_ require sync reply,
@ -450,8 +401,24 @@ static int pohmelfs_readdir_response(struct netfs_state *st)
if (err != -EEXIST)
goto err_out_put;
} else {
struct dentry *dentry, *alias, *pd;
set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
clear_bit(NETFS_INODE_OWNED, &npi->state);
pd = d_find_alias(&parent->vfs_inode);
if (pd) {
str.hash = full_name_hash(str.name, str.len);
dentry = d_alloc(pd, &str);
if (dentry) {
alias = d_materialise_unique(dentry, &npi->vfs_inode);
if (alias)
dput(dentry);
}
dput(dentry);
dput(pd);
}
}
}
out:
@ -638,15 +605,12 @@ static int pohmelfs_transaction_response(struct netfs_state *st)
if (dst) {
netfs_trans_remove_nolock(dst, st);
t = dst->trans;
pohmelfs_ftrans_update(cmd->start);
}
mutex_unlock(&st->trans_lock);
if (!t) {
int check = pohmelfs_ftrans_check(cmd->start);
printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u, double: %d.\n",
__func__, cmd->start, cmd->id, cmd->size, cmd->ext, check);
printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u.\n",
__func__, cmd->start, cmd->id, cmd->size, cmd->ext);
err = -EINVAL;
goto out;
}

View File

@ -87,6 +87,7 @@ enum {
POHMELFS_FLAGS_DEL, /* Network state control message for DEL */
POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */
POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */
POHMELFS_FLAGS_MODIFY, /* Network state modification message */
};
/*
@ -116,16 +117,20 @@ struct pohmelfs_crypto
unsigned char data[0]; /* Algorithm string, key and IV */
};
#define POHMELFS_IO_PERM_READ (1<<0)
#define POHMELFS_IO_PERM_WRITE (1<<1)
/*
* Configuration command used to create table of different remote servers.
*/
struct pohmelfs_ctl
{
unsigned int idx; /* Config index */
unsigned int type; /* Socket type */
unsigned int proto; /* Socket protocol */
unsigned int addrlen; /* Size of the address */
unsigned short unused; /* Align structure by 4 bytes */
__u32 idx; /* Config index */
__u32 type; /* Socket type */
__u32 proto; /* Socket protocol */
__u16 addrlen; /* Size of the address */
__u16 perm; /* IO permission */
__u16 prio; /* IO priority */
struct saddr addr; /* Remote server address */
};
@ -921,12 +926,6 @@ static inline void pohmelfs_mcache_put(struct pohmelfs_sb *psb,
pohmelfs_mcache_free(psb, m);
}
int pohmelfs_ftrans_init(void);
void pohmelfs_ftrans_exit(void);
void pohmelfs_ftrans_update(u64 id);
int pohmelfs_ftrans_check(u64 id);
void pohmelfs_ftrans_clean(u64 id);
#endif /* __KERNEL__*/
#endif /* __NETFS_H */

View File

@ -456,34 +456,25 @@ int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb)
__func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state);
#endif
mutex_lock(&psb->state_lock);
if ((t->flags & NETFS_TRANS_SINGLE_DST) && psb->active_state) {
st = &psb->active_state->state;
err = -EPIPE;
if (netfs_state_poll(st) & POLLOUT) {
err = netfs_trans_push_dst(t, st);
if (!err) {
err = netfs_trans_send(t, st);
if (err) {
netfs_trans_drop_last(t, st);
} else {
pohmelfs_switch_active(psb);
goto out;
}
}
}
pohmelfs_switch_active(psb);
}
list_for_each_entry(c, &psb->state_list, config_entry) {
st = &c->state;
if (t->flags & NETFS_TRANS_SINGLE_DST) {
if (!(st->ctl.perm & POHMELFS_IO_PERM_READ))
continue;
} else {
if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE))
continue;
}
if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio))
st = &psb->active_state->state;
err = netfs_trans_push(t, st);
if (!err && (t->flags & NETFS_TRANS_SINGLE_DST))
break;
}
out:
mutex_unlock(&psb->state_lock);
#if 0
dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
@ -501,8 +492,6 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
t->gen = atomic_inc_return(&psb->trans_gen);
pohmelfs_ftrans_clean(t->gen);
cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
cmd->cmd = NETFS_TRANS;

View File

@ -722,6 +722,20 @@ int rt28xx_open(IN PNET_DEV dev)
return (-1);
} /* End of rt28xx_open */
static const struct net_device_ops rt2860_netdev_ops = {
.ndo_open = MainVirtualIF_open,
.ndo_stop = MainVirtualIF_close,
.ndo_do_ioctl = rt28xx_ioctl,
.ndo_get_stats = RT28xx_get_ether_stats,
.ndo_validate_addr = NULL,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef IKANOS_VX_1X0
.ndo_start_xmit = IKANOS_DataFramesTx,
#else
.ndo_start_xmit = rt28xx_send_packets,
#endif
};
/* Must not be called for mdev and apdev */
static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd)
@ -733,11 +747,6 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p
//ether_setup(dev);
dev->hard_start_xmit = rt28xx_send_packets;
#ifdef IKANOS_VX_1X0
dev->hard_start_xmit = IKANOS_DataFramesTx;
#endif // IKANOS_VX_1X0 //
#ifdef CONFIG_STA_SUPPORT
#if WIRELESS_EXT >= 12
@ -760,12 +769,8 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p
#if WIRELESS_EXT < 21
dev->get_wireless_stats = rt28xx_get_wireless_stats;
#endif
dev->get_stats = RT28xx_get_ether_stats;
dev->open = MainVirtualIF_open; //rt28xx_open;
dev->stop = MainVirtualIF_close; //rt28xx_close;
dev->priv_flags = INT_MAIN;
dev->do_ioctl = rt28xx_ioctl;
dev->validate_addr = NULL;
dev->netdev_ops = &rt2860_netdev_ops;
// find available device name
for (i = 0; i < 8; i++)
{

View File

@ -96,6 +96,7 @@
{USB_DEVICE(0x0DF6,0x002B)}, /* Sitecom */ \
{USB_DEVICE(0x0DF6,0x002C)}, /* Sitecom */ \
{USB_DEVICE(0x0DF6,0x002D)}, /* Sitecom */ \
{USB_DEVICE(0x0DF6,0x0039)}, /* Sitecom */ \
{USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \
{USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \
{USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \

View File

@ -855,6 +855,20 @@ int rt28xx_open(IN PNET_DEV dev)
return (-1);
} /* End of rt28xx_open */
static const struct net_device_ops rt2870_netdev_ops = {
.ndo_open = MainVirtualIF_open,
.ndo_stop = MainVirtualIF_close,
.ndo_do_ioctl = rt28xx_ioctl,
.ndo_get_stats = RT28xx_get_ether_stats,
.ndo_validate_addr = NULL,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef IKANOS_VX_1X0
.ndo_start_xmit = IKANOS_DataFramesTx,
#else
.ndo_start_xmit = rt28xx_send_packets,
#endif
};
/* Must not be called for mdev and apdev */
static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd)
@ -866,12 +880,6 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p
//ether_setup(dev);
dev->hard_start_xmit = rt28xx_send_packets;
#ifdef IKANOS_VX_1X0
dev->hard_start_xmit = IKANOS_DataFramesTx;
#endif // IKANOS_VX_1X0 //
// dev->set_multicast_list = ieee80211_set_multicast_list;
// dev->change_mtu = ieee80211_change_mtu;
#ifdef CONFIG_STA_SUPPORT
@ -895,16 +903,10 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p
#if WIRELESS_EXT < 21
dev->get_wireless_stats = rt28xx_get_wireless_stats;
#endif
dev->get_stats = RT28xx_get_ether_stats;
dev->open = MainVirtualIF_open; //rt28xx_open;
dev->stop = MainVirtualIF_close; //rt28xx_close;
// dev->uninit = ieee80211_if_reinit;
// dev->destructor = ieee80211_if_free;
dev->priv_flags = INT_MAIN;
dev->do_ioctl = rt28xx_ioctl;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
dev->validate_addr = NULL;
#endif
dev->netdev_ops = &rt2870_netdev_ops;
// find available device name
for (i = 0; i < 8; i++)
{

View File

@ -436,7 +436,6 @@ static int rt28xx_init(IN struct net_device *net_dev)
// OID_SET_HT_PHYMODE SetHT;
// WPDMA_GLO_CFG_STRUC GloCfg;
UINT32 MacCsr0 = 0;
UINT32 MacValue = 0;
#ifdef RT2870
#ifdef INF_AMAZON_SE
@ -849,6 +848,20 @@ int rt28xx_open(IN PNET_DEV dev)
return (-1);
} /* End of rt28xx_open */
static const struct net_device_ops rt3070_netdev_ops = {
.ndo_open = MainVirtualIF_open,
.ndo_stop = MainVirtualIF_close,
.ndo_do_ioctl = rt28xx_ioctl,
.ndo_get_stats = RT28xx_get_ether_stats,
.ndo_validate_addr = NULL,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef IKANOS_VX_1X0
.ndo_start_xmit = IKANOS_DataFramesTx,
#else
.ndo_start_xmit = rt28xx_send_packets,
#endif
};
/* Must not be called for mdev and apdev */
static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER pAd)
@ -860,12 +873,6 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p
//ether_setup(dev);
dev->hard_start_xmit = rt28xx_send_packets;
#ifdef IKANOS_VX_1X0
dev->hard_start_xmit = IKANOS_DataFramesTx;
#endif // IKANOS_VX_1X0 //
// dev->set_multicast_list = ieee80211_set_multicast_list;
// dev->change_mtu = ieee80211_change_mtu;
#ifdef CONFIG_STA_SUPPORT
@ -889,16 +896,10 @@ static NDIS_STATUS rt_ieee80211_if_setup(struct net_device *dev, PRTMP_ADAPTER p
#if WIRELESS_EXT < 21
dev->get_wireless_stats = rt28xx_get_wireless_stats;
#endif
dev->get_stats = RT28xx_get_ether_stats;
dev->open = MainVirtualIF_open; //rt28xx_open;
dev->stop = MainVirtualIF_close; //rt28xx_close;
// dev->uninit = ieee80211_if_reinit;
// dev->destructor = ieee80211_if_free;
dev->priv_flags = INT_MAIN;
dev->do_ioctl = rt28xx_ioctl;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
dev->validate_addr = NULL;
#endif
dev->netdev_ops = &rt3070_netdev_ops;
// find available device name
for (i = 0; i < 8; i++)
{

View File

@ -10,7 +10,36 @@ TODO:
- move firmware loading to request_firmware()
- remove direct memory access of structures
- any remaining sparse and checkpatch.pl warnings
- any netdev recommended changes
- use net_device_ops
- use dev->stats rather than adapter->stats
- don't cast netdev_priv it is already void
- use compare_ether_addr
- GET RID OF MACROS
- work on all architectures
- without CONFIG_X86_64 confusion
- do 64 bit correctly
- don't depend on order of union
- get rid of ASSERT(), use BUG() instead but only where necessary
looks like most aren't really useful
- no new SIOCDEVPRIVATE ioctl allowed
- don't use module_param for configuring interrupt mitigation
use ethtool instead
- reorder code to elminate use of forward declarations
- don't keep private linked list of drivers.
- remove all the gratiutous debug infrastructure
- use PCI_DEVICE()
- do ethtool correctly using ethtool_ops
- NAPI?
- wasted overhead of extra stats
- state variables for things that are
easily availble and shouldn't be kept in card structure, cardnum, ...
slotnumber, events, ...
- get rid of slic_spinlock wrapper
- volatile == bad design => bad code
- locking too fine grained, not designed just throw more locks
at problem
Please send patches to:
Greg Kroah-Hartman <gregkh@suse.de>

View File

@ -345,6 +345,19 @@ static void slic_init_adapter(struct net_device *netdev,
return;
}
static const struct net_device_ops slic_netdev_ops = {
.ndo_open = slic_entry_open,
.ndo_stop = slic_entry_halt,
.ndo_start_xmit = slic_xmit_start,
.ndo_do_ioctl = slic_ioctl,
.ndo_set_mac_address = slic_mac_set_address,
.ndo_get_stats = slic_get_stats,
.ndo_set_multicast_list = slic_mcast_set_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
};
static int __devinit slic_entry_probe(struct pci_dev *pcidev,
const struct pci_device_id *pci_tbl_entry)
{
@ -442,13 +455,7 @@ static int __devinit slic_entry_probe(struct pci_dev *pcidev,
netdev->base_addr = (unsigned long)adapter->memorybase;
netdev->irq = adapter->irq;
netdev->open = slic_entry_open;
netdev->stop = slic_entry_halt;
netdev->hard_start_xmit = slic_xmit_start;
netdev->do_ioctl = slic_ioctl;
netdev->set_mac_address = slic_mac_set_address;
netdev->get_stats = slic_get_stats;
netdev->set_multicast_list = slic_mcast_set_list;
netdev->netdev_ops = &slic_netdev_ops;
slic_debug_adapter_create(adapter);
@ -1260,7 +1267,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
}
/* Doesn't already exist. Allocate a structure to hold it */
mcaddr = kmalloc(sizeof(struct mcast_address), GFP_KERNEL);
mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
if (mcaddr == NULL)
return 1;
@ -2284,7 +2291,7 @@ static u32 slic_card_locate(struct adapter *adapter)
}
if (!physcard) {
/* no structure allocated for this physical card yet */
physcard = kzalloc(sizeof(struct physcard), GFP_KERNEL);
physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC);
ASSERT(physcard);
physcard->next = slic_global.phys_card;

View File

@ -1,6 +1,6 @@
config STLC45XX
tristate "stlc4550/4560 support"
depends on MAC80211 && WLAN_80211 && SPI_MASTER
depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS
---help---
This is a driver for stlc4550 and stlc4560 chipsets.

View File

@ -322,6 +322,8 @@ int sxg_add_msi_isr(struct adapter_t *adapter)
int ret,i;
if (!adapter->intrregistered) {
spin_unlock_irqrestore(&sxg_global.driver_lock,
sxg_global.flags);
for (i=0; i<adapter->nr_msix_entries; i++) {
ret = request_irq (adapter->msi_entries[i].vector,
sxg_isr,
@ -329,6 +331,8 @@ int sxg_add_msi_isr(struct adapter_t *adapter)
adapter->netdev->name,
adapter->netdev);
if (ret) {
spin_lock_irqsave(&sxg_global.driver_lock,
sxg_global.flags);
DBG_ERROR("sxg: MSI-X request_irq (%s) "
"FAILED [%x]\n", adapter->netdev->name,
ret);
@ -336,6 +340,7 @@ int sxg_add_msi_isr(struct adapter_t *adapter)
}
}
}
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
adapter->msi_enabled = TRUE;
adapter->intrregistered = 1;
adapter->IntRegistered = TRUE;
@ -896,6 +901,22 @@ static inline int sxg_read_config(struct adapter_t *adapter)
return status;
}
static const struct net_device_ops sxg_netdev_ops = {
.ndo_open = sxg_entry_open,
.ndo_stop = sxg_entry_halt,
.ndo_start_xmit = sxg_send_packets,
.ndo_do_ioctl = sxg_ioctl,
.ndo_change_mtu = sxg_change_mtu,
.ndo_get_stats = sxg_get_stats,
.ndo_set_multicast_list = sxg_mcast_set_list,
.ndo_validate_addr = eth_validate_addr,
#if XXXTODO
.ndo_set_mac_address = sxg_mac_set_address,
#else
.ndo_set_mac_address = eth_mac_addr,
#endif
};
static int sxg_entry_probe(struct pci_dev *pcidev,
const struct pci_device_id *pci_tbl_entry)
{
@ -1095,16 +1116,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
netdev->base_addr = (unsigned long)adapter->base_addr;
netdev->irq = adapter->irq;
netdev->open = sxg_entry_open;
netdev->stop = sxg_entry_halt;
netdev->hard_start_xmit = sxg_send_packets;
netdev->do_ioctl = sxg_ioctl;
netdev->change_mtu = sxg_change_mtu;
#if XXXTODO
netdev->set_mac_address = sxg_mac_set_address;
#endif
netdev->get_stats = sxg_get_stats;
netdev->set_multicast_list = sxg_mcast_set_list;
netdev->netdev_ops = &sxg_netdev_ops;
SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
err = sxg_set_interrupt_capability(adapter);
@ -2247,6 +2259,8 @@ static int sxg_entry_open(struct net_device *dev)
DBG_ERROR("sxg: %s EXIT\n", __func__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
mod_timer(&adapter->watchdog_timer, jiffies);
return STATUS_SUCCESS;
}
@ -2568,6 +2582,7 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
u64 phys_addr;
unsigned long flags;
unsigned long queue_id=0;
int offload_cksum = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
pSgl, SxgSgl, 0, 0);
@ -2606,7 +2621,11 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
struct iphdr *ip;
ip = ip_hdr(skb);
if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
if (ip->protocol == IPPROTO_TCP)
offload_cksum = 1;
if (!offload_cksum || !tcp_hdr(skb))
queue_id = 0;
else if (offload_cksum && (DataLength >= sizeof(
struct tcphdr))){
queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
(ntohs (tcp_hdr(skb)->source) &
@ -2615,8 +2634,11 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
SXG_LARGE_SEND_QUEUE_MASK));
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
sizeof(struct tcphdr)) ) {
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
offload_cksum = 1;
if (!offload_cksum || !tcp_hdr(skb))
queue_id = 0;
else if (offload_cksum && (DataLength>=sizeof(struct tcphdr))){
queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
(ntohs (tcp_hdr(skb)->source) &
SXG_LARGE_SEND_QUEUE_MASK):
@ -2645,23 +2667,38 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
/* Update stats */
adapter->stats.tx_packets++;
adapter->stats.tx_bytes += DataLength;
#if XXXTODO /* Stats stuff */
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
adapter->Stats.DumbXmtBcastPkts++;
adapter->Stats.DumbXmtBcastBytes += DataLength;
memset(XmtCmd, '\0', sizeof(*XmtCmd));
XmtCmd->SgEntries = 1;
XmtCmd->Flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
/*
* We need to set the Checkum in IP header to 0. This is
* required by hardware.
*/
if (offload_cksum) {
ip_hdr(skb)->check = 0x0;
XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
/*
* Dont know if length will require a change in
* case of VLAN
*/
XmtCmd->CsumFlags.MacLen = ETH_HLEN;
XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
SXG_NW_HDR_LEN_SHIFT;
} else {
adapter->Stats.DumbXmtMcastPkts++;
adapter->Stats.DumbXmtMcastBytes += DataLength;
if (skb_checksum_help(skb)){
printk(KERN_EMERG "Dropped UDP packet for"
" incorrect checksum calculation\n");
if (XmtCmd)
SXG_ABORT_CMD(XmtRingInfo);
spin_unlock_irqrestore(&adapter->XmtZeroLock,
flags);
return STATUS_SUCCESS;
}
}
} else {
adapter->Stats.DumbXmtUcastPkts++;
adapter->Stats.DumbXmtUcastBytes += DataLength;
}
#endif
/*
* Fill in the command
* Copy out the first SGE to the command and adjust for offset
@ -2679,31 +2716,17 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
(SXG_INVALID_SGL(phys_addr,skb->data_len)))
{
spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
if (XmtCmd)
SXG_ABORT_CMD(XmtRingInfo);
/* Silently drop this packet */
printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
return STATUS_SUCCESS;
}
memset(XmtCmd, '\0', sizeof(*XmtCmd));
XmtCmd->Buffer.FirstSgeAddress = phys_addr;
XmtCmd->Buffer.FirstSgeLength = DataLength;
XmtCmd->Buffer.SgeOffset = 0;
XmtCmd->Buffer.TotalLength = DataLength;
XmtCmd->SgEntries = 1;
XmtCmd->Flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
/*
* We need to set the Checkum in IP header to 0. This is
* required by hardware.
*/
ip_hdr(skb)->check = 0x0;
XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
/* Dont know if length will require a change in case of VLAN */
XmtCmd->CsumFlags.MacLen = ETH_HLEN;
XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
SXG_NW_HDR_LEN_SHIFT;
}
/*
* Advance transmit cmd descripter by 1.
* NOTE - See comments in SxgTcpOutput where we write
@ -2715,6 +2738,24 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
adapter->Stats.XmtQLen++; /* Stats within lock */
/* Update stats */
adapter->stats.tx_packets++;
adapter->stats.tx_bytes += DataLength;
#if XXXTODO /* Stats stuff */
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
adapter->Stats.DumbXmtBcastPkts++;
adapter->Stats.DumbXmtBcastBytes += DataLength;
} else {
adapter->Stats.DumbXmtMcastPkts++;
adapter->Stats.DumbXmtMcastBytes += DataLength;
}
} else {
adapter->Stats.DumbXmtUcastPkts++;
adapter->Stats.DumbXmtUcastBytes += DataLength;
}
#endif
spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
XmtCmd, pSgl, SxgSgl, 0);

View File

@ -603,10 +603,9 @@ static void ATEN2011_bulk_out_data_callback(struct urb *urb)
tty = tty_port_tty_get(&ATEN2011_port->port->port);
if (tty && ATEN2011_port->open) {
if (tty && ATEN2011_port->open)
/* tell the tty driver that something has changed */
wake_up_interruptible(&tty->write_wait);
}
tty_wakeup(tty);
/* schedule_work(&ATEN2011_port->port->work); */
tty_kref_put(tty);
@ -825,12 +824,6 @@ static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port,
status = 0;
status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
/* force low_latency on so that our tty_push actually forces *
* the data through,otherwise it is scheduled, and with *
* high data rates (like with OHCI) data can get lost. */
if (tty)
tty->low_latency = 1;
/*
* Check to see if we've set up our endpoint info yet
* (can't set it up in ATEN2011_startup as the structures
@ -1473,22 +1466,7 @@ static void ATEN2011_set_termios(struct tty_struct *tty,
cflag = tty->termios->c_cflag;
if (!cflag) {
dbg("%s %s", __func__, "cflag is NULL");
return;
}
/* check that they really want us to change something */
if (old_termios) {
if ((cflag == old_termios->c_cflag) &&
(RELEVANT_IFLAG(tty->termios->c_iflag) ==
RELEVANT_IFLAG(old_termios->c_iflag))) {
dbg("%s", "Nothing to change");
return;
}
}
dbg("%s - clfag %08x iflag %08x", __func__,
dbg("%s - cflag %08x iflag %08x", __func__,
tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag));
if (old_termios) {

View File

@ -711,6 +711,20 @@ static int wlan_change_mtu(netdevice_t *dev, int new_mtu)
return 0;
}
static const struct net_device_ops p80211_netdev_ops = {
.ndo_init = p80211knetdev_init,
.ndo_open = p80211knetdev_open,
.ndo_stop = p80211knetdev_stop,
.ndo_get_stats = p80211knetdev_get_stats,
.ndo_start_xmit = p80211knetdev_hard_start_xmit,
.ndo_set_multicast_list = p80211knetdev_set_multicast_list,
.ndo_do_ioctl = p80211knetdev_do_ioctl,
.ndo_set_mac_address = p80211knetdev_set_mac_address,
.ndo_tx_timeout = p80211knetdev_tx_timeout,
.ndo_change_mtu = wlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
/*----------------------------------------------------------------
* wlan_setup
*
@ -756,11 +770,7 @@ int wlan_setup(wlandevice_t *wlandev)
} else {
wlandev->netdev = dev;
dev->ml_priv = wlandev;
dev->hard_start_xmit = p80211knetdev_hard_start_xmit;
dev->get_stats = p80211knetdev_get_stats;
dev->init = p80211knetdev_init;
dev->open = p80211knetdev_open;
dev->stop = p80211knetdev_stop;
dev->netdev_ops = &p80211_netdev_ops;
mutex_init(&wlandev->ioctl_lock);
/* block ioctls until fully initialised. Don't forget to call