firewire: Split the iso buffer out from fw_iso_context and avoid vmalloc.

This patch splits out the iso buffer so we can initialize it at mmap
time with the size provided in the mmap call.  Furthermore, allocate
the backing pages using alloc_page to avoid setting up kernel side
virtual memory mappings for the pages.

Signed-off-by: Kristian Høgsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
This commit is contained in:
Kristian Høgsberg 2007-02-16 17:34:38 -05:00 committed by Stefan Richter
parent 6e2e8424d3
commit 9aad812538
5 changed files with 148 additions and 81 deletions

View File

@ -71,8 +71,10 @@ struct client {
struct list_head event_list; struct list_head event_list;
struct semaphore event_list_sem; struct semaphore event_list_sem;
wait_queue_head_t wait; wait_queue_head_t wait;
unsigned long vm_start;
struct fw_iso_context *iso_context; struct fw_iso_context *iso_context;
struct fw_iso_buffer buffer;
unsigned long vm_start;
}; };
static inline void __user * static inline void __user *
@ -406,7 +408,6 @@ static int ioctl_create_iso_context(struct client *client, void __user *arg)
client->iso_context = fw_iso_context_create(client->device->card, client->iso_context = fw_iso_context_create(client->device->card,
FW_ISO_CONTEXT_TRANSMIT, FW_ISO_CONTEXT_TRANSMIT,
request.buffer_size,
iso_callback, client); iso_callback, client);
if (IS_ERR(client->iso_context)) if (IS_ERR(client->iso_context))
return PTR_ERR(client->iso_context); return PTR_ERR(client->iso_context);
@ -418,8 +419,7 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
{ {
struct fw_cdev_queue_iso request; struct fw_cdev_queue_iso request;
struct fw_cdev_iso_packet __user *p, *end, *next; struct fw_cdev_iso_packet __user *p, *end, *next;
void *payload, *payload_end; unsigned long payload, payload_end;
unsigned long index;
int count; int count;
struct { struct {
struct fw_iso_packet packet; struct fw_iso_packet packet;
@ -434,20 +434,17 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
/* If the user passes a non-NULL data pointer, has mmap()'ed /* If the user passes a non-NULL data pointer, has mmap()'ed
* the iso buffer, and the pointer points inside the buffer, * the iso buffer, and the pointer points inside the buffer,
* we setup the payload pointers accordingly. Otherwise we * we setup the payload pointers accordingly. Otherwise we
* set them both to NULL, which will still let packets with * set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets * payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped * use the indirect payload, the iso buffer need not be mapped
* and the request.data pointer is ignored.*/ * and the request.data pointer is ignored.*/
index = (unsigned long)request.data - client->vm_start; payload = (unsigned long)request.data - client->vm_start;
if (request.data != 0 && client->vm_start != 0 && payload_end = payload + (client->buffer.page_count << PAGE_SHIFT);
index <= client->iso_context->buffer_size) { if (request.data == 0 || client->buffer.pages == NULL ||
payload = client->iso_context->buffer + index; payload >= payload_end) {
payload_end = client->iso_context->buffer + payload = 0;
client->iso_context->buffer_size; payload_end = 0;
} else {
payload = NULL;
payload_end = NULL;
} }
if (!access_ok(VERIFY_READ, request.packets, request.size)) if (!access_ok(VERIFY_READ, request.packets, request.size))
@ -473,7 +470,7 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
return -EINVAL; return -EINVAL;
if (fw_iso_context_queue(client->iso_context, if (fw_iso_context_queue(client->iso_context,
&u.packet, payload)) &u.packet, &client->buffer, payload))
break; break;
p = next; p = next;
@ -483,8 +480,7 @@ static int ioctl_queue_iso(struct client *client, void __user *arg)
request.size -= uptr_to_u64(p) - request.packets; request.size -= uptr_to_u64(p) - request.packets;
request.packets = uptr_to_u64(p); request.packets = uptr_to_u64(p);
request.data = request.data = client->vm_start + payload;
client->vm_start + (payload - client->iso_context->buffer);
if (copy_to_user(arg, &request, sizeof request)) if (copy_to_user(arg, &request, sizeof request))
return -EFAULT; return -EFAULT;
@ -549,13 +545,41 @@ fw_device_op_compat_ioctl(struct file *file,
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{ {
struct client *client = file->private_data; struct client *client = file->private_data;
enum dma_data_direction direction;
unsigned long size;
int page_count, retval;
if (client->iso_context->buffer == NULL) /* FIXME: We could support multiple buffers, but we don't. */
if (client->buffer.pages != NULL)
return -EBUSY;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
if (vma->vm_start & ~PAGE_MASK)
return -EINVAL; return -EINVAL;
client->vm_start = vma->vm_start; client->vm_start = vma->vm_start;
size = vma->vm_end - vma->vm_start;
page_count = size >> PAGE_SHIFT;
if (size & ~PAGE_MASK)
return -EINVAL;
return remap_vmalloc_range(vma, client->iso_context->buffer, 0); if (vma->vm_flags & VM_WRITE)
direction = DMA_TO_DEVICE;
else
direction = DMA_FROM_DEVICE;
retval = fw_iso_buffer_init(&client->buffer, client->device->card,
page_count, direction);
if (retval < 0)
return retval;
retval = fw_iso_buffer_map(&client->buffer, vma);
if (retval < 0)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
return retval;
} }
static int fw_device_op_release(struct inode *inode, struct file *file) static int fw_device_op_release(struct inode *inode, struct file *file)
@ -564,6 +588,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct address_handler *h, *next; struct address_handler *h, *next;
struct request *r, *next_r; struct request *r, *next_r;
if (client->buffer.pages)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
if (client->iso_context) if (client->iso_context)
fw_iso_context_destroy(client->iso_context); fw_iso_context_destroy(client->iso_context);

View File

@ -126,7 +126,7 @@ struct fw_cdev_allocate {
}; };
struct fw_cdev_create_iso_context { struct fw_cdev_create_iso_context {
__u32 buffer_size; __u32 handle;
}; };
struct fw_cdev_iso_packet { struct fw_cdev_iso_packet {

View File

@ -28,68 +28,88 @@
#include "fw-topology.h" #include "fw-topology.h"
#include "fw-device.h" #include "fw-device.h"
static int int
setup_iso_buffer(struct fw_iso_context *ctx, size_t size, fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction) int page_count, enum dma_data_direction direction)
{ {
struct page *page; int i, j, retval = -ENOMEM;
int i, j; dma_addr_t address;
void *p;
ctx->buffer_size = PAGE_ALIGN(size); buffer->page_count = page_count;
if (size == 0) buffer->direction = direction;
return 0;
ctx->buffer = vmalloc_32_user(ctx->buffer_size); buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
if (ctx->buffer == NULL) GFP_KERNEL);
goto fail_buffer_alloc; if (buffer->pages == NULL)
goto out;
ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; for (i = 0; i < buffer->page_count; i++) {
ctx->pages = buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); if (buffer->pages[i] == NULL)
if (ctx->pages == NULL) goto out_pages;
goto fail_pages_alloc;
address = dma_map_page(card->device, buffer->pages[i],
p = ctx->buffer; 0, PAGE_SIZE, direction);
for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { if (dma_mapping_error(address)) {
page = vmalloc_to_page(p); __free_page(buffer->pages[i]);
ctx->pages[i] = dma_map_page(ctx->card->device, goto out_pages;
page, 0, PAGE_SIZE, direction); }
if (dma_mapping_error(ctx->pages[i])) set_page_private(buffer->pages[i], address);
goto fail_mapping;
} }
return 0; return 0;
fail_mapping: out_pages:
for (j = 0; j < i; j++) for (j = 0; j < i; j++) {
dma_unmap_page(ctx->card->device, ctx->pages[j], address = page_private(buffer->pages[j]);
dma_unmap_page(card->device, address,
PAGE_SIZE, DMA_TO_DEVICE); PAGE_SIZE, DMA_TO_DEVICE);
fail_pages_alloc: __free_page(buffer->pages[j]);
vfree(ctx->buffer); }
fail_buffer_alloc: kfree(buffer->pages);
return -ENOMEM; out:
buffer->pages = NULL;
return retval;
} }
static void destroy_iso_buffer(struct fw_iso_context *ctx) int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
unsigned long uaddr;
int i, retval;
uaddr = vma->vm_start;
for (i = 0; i < buffer->page_count; i++) {
retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
if (retval)
return retval;
uaddr += PAGE_SIZE;
}
return 0;
}
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{ {
int i; int i;
dma_addr_t address;
for (i = 0; i < ctx->page_count; i++) for (i = 0; i < buffer->page_count; i++) {
dma_unmap_page(ctx->card->device, ctx->pages[i], address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
PAGE_SIZE, DMA_TO_DEVICE); PAGE_SIZE, DMA_TO_DEVICE);
__free_page(buffer->pages[i]);
}
kfree(ctx->pages); kfree(buffer->pages);
vfree(ctx->buffer); buffer->pages = NULL;
} }
struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
size_t buffer_size,
fw_iso_callback_t callback, fw_iso_callback_t callback,
void *callback_data) void *callback_data)
{ {
struct fw_iso_context *ctx; struct fw_iso_context *ctx;
int retval;
ctx = card->driver->allocate_iso_context(card, type); ctx = card->driver->allocate_iso_context(card, type);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
@ -100,12 +120,6 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
ctx->callback = callback; ctx->callback = callback;
ctx->callback_data = callback_data; ctx->callback_data = callback_data;
retval = setup_iso_buffer(ctx, buffer_size, DMA_TO_DEVICE);
if (retval < 0) {
card->driver->free_iso_context(ctx);
return ERR_PTR(retval);
}
return ctx; return ctx;
} }
EXPORT_SYMBOL(fw_iso_context_create); EXPORT_SYMBOL(fw_iso_context_create);
@ -114,8 +128,6 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
{ {
struct fw_card *card = ctx->card; struct fw_card *card = ctx->card;
destroy_iso_buffer(ctx);
card->driver->free_iso_context(ctx); card->driver->free_iso_context(ctx);
} }
EXPORT_SYMBOL(fw_iso_context_destroy); EXPORT_SYMBOL(fw_iso_context_destroy);
@ -133,10 +145,12 @@ EXPORT_SYMBOL(fw_iso_context_send);
int int
fw_iso_context_queue(struct fw_iso_context *ctx, fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet, void *payload) struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{ {
struct fw_card *card = ctx->card; struct fw_card *card = ctx->card;
return card->driver->queue_iso(ctx, packet, payload); return card->driver->queue_iso(ctx, packet, buffer, payload);
} }
EXPORT_SYMBOL(fw_iso_context_queue); EXPORT_SYMBOL(fw_iso_context_queue);

View File

@ -1251,14 +1251,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
static int static int
ohci_queue_iso(struct fw_iso_context *base, ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet, void *payload) struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{ {
struct iso_context *ctx = (struct iso_context *)base; struct iso_context *ctx = (struct iso_context *)base;
struct fw_ohci *ohci = fw_ohci(ctx->base.card); struct fw_ohci *ohci = fw_ohci(ctx->base.card);
struct descriptor *d, *end, *last, *tail, *pd; struct descriptor *d, *end, *last, *tail, *pd;
struct fw_iso_packet *p; struct fw_iso_packet *p;
__le32 *header; __le32 *header;
dma_addr_t d_bus; dma_addr_t d_bus, page_bus;
u32 z, header_z, payload_z, irq; u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index; u32 payload_index, payload_end_index, next_page_index;
int index, page, end_page, i, length, offset; int index, page, end_page, i, length, offset;
@ -1267,7 +1269,7 @@ ohci_queue_iso(struct fw_iso_context *base,
* packet, retransmit or terminate.. */ * packet, retransmit or terminate.. */
p = packet; p = packet;
payload_index = payload - ctx->base.buffer; payload_index = payload;
d = ctx->head_descriptor; d = ctx->head_descriptor;
tail = ctx->tail_descriptor; tail = ctx->tail_descriptor;
end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor); end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor);
@ -1337,7 +1339,9 @@ ohci_queue_iso(struct fw_iso_context *base,
length = length =
min(next_page_index, payload_end_index) - payload_index; min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length); pd[i].req_count = cpu_to_le16(length);
pd[i].data_address = cpu_to_le32(ctx->base.pages[page] + offset);
page_bus = page_private(buffer->pages[page]);
pd[i].data_address = cpu_to_le32(page_bus + offset);
payload_index += length; payload_index += length;
} }

View File

@ -27,6 +27,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/dma-mapping.h>
#define TCODE_WRITE_QUADLET_REQUEST 0 #define TCODE_WRITE_QUADLET_REQUEST 0
#define TCODE_WRITE_BLOCK_REQUEST 1 #define TCODE_WRITE_BLOCK_REQUEST 1
@ -336,6 +337,18 @@ struct fw_iso_context;
typedef void (*fw_iso_callback_t) (struct fw_iso_context *context, typedef void (*fw_iso_callback_t) (struct fw_iso_context *context,
int status, u32 cycle, void *data); int status, u32 cycle, void *data);
/* An iso buffer is just a set of pages mapped for DMA in the
* specified direction. Since the pages are to be used for DMA, they
* are not mapped into the kernel virtual address space. We store the
* DMA address in the page private. The helper function
* fw_iso_buffer_map() will map the pages into a given vma. */
struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
int page_count;
};
struct fw_iso_context { struct fw_iso_context {
struct fw_card *card; struct fw_card *card;
int type; int type;
@ -343,19 +356,24 @@ struct fw_iso_context {
int speed; int speed;
fw_iso_callback_t callback; fw_iso_callback_t callback;
void *callback_data; void *callback_data;
void *buffer;
size_t buffer_size;
dma_addr_t *pages;
int page_count;
}; };
int
fw_iso_buffer_init(struct fw_iso_buffer *buffer,
struct fw_card *card,
int page_count,
enum dma_data_direction direction);
int
fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
void
fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
struct fw_iso_context * struct fw_iso_context *
fw_iso_context_create(struct fw_card *card, int type, fw_iso_context_create(struct fw_card *card, int type,
size_t buffer_size,
fw_iso_callback_t callback, fw_iso_callback_t callback,
void *callback_data); void *callback_data);
void void
fw_iso_context_destroy(struct fw_iso_context *ctx); fw_iso_context_destroy(struct fw_iso_context *ctx);
@ -365,7 +383,9 @@ fw_iso_context_start(struct fw_iso_context *ctx,
int int
fw_iso_context_queue(struct fw_iso_context *ctx, fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet, void *payload); struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload);
int int
fw_iso_context_send(struct fw_iso_context *ctx, fw_iso_context_send(struct fw_iso_context *ctx,
@ -410,7 +430,9 @@ struct fw_card_driver {
int (*send_iso)(struct fw_iso_context *ctx, s32 cycle); int (*send_iso)(struct fw_iso_context *ctx, s32 cycle);
int (*queue_iso)(struct fw_iso_context *ctx, int (*queue_iso)(struct fw_iso_context *ctx,
struct fw_iso_packet *packet, void *payload); struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload);
}; };
int int