Merge branch 'staging-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6
* 'staging-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (28 commits) staging: usbip: bugfix for isochronous packets and optimization staging: usbip: bugfix add number of packets for isochronous frames staging: usbip: bugfixes related to kthread conversion staging: usbip: fix shutdown problems. staging: hv: Fix GARP not sent after Quick Migration staging: IIO: IMU: ADIS16400: Avoid using printk facility directly staging: IIO: IMU: ADIS16400: Fix product ID check, skip embedded revision number staging: IIO: IMU: ADIS16400: Make sure only enabled scan_elements are pushed into the ring staging: IIO: IMU: ADIS16400: Fix addresses of GYRO and ACCEL calibration offset staging: IIO: IMU: ADIS16400: Add delay after self test staging: IIO: IMU: ADIS16400: Fix up SPI messages cs_change behavior staging/rtl81*: build as loadable modules only staging: brcm80211: removed 'is_amsdu causing toss' log spam staging: brcm80211: fix for 'Short CCK' log spam staging: brcm80211: fix for 'AC_BE txop..' logs spammed problem staging: memrar: remove driver from tree staging: sep: remove last memrar remnants staging: fix hv_mouse build, needs delay.h staging: fix olpc_dcon build errors staging: sm7xx: fixed defines ... Fix up trivial conflict in drivers/staging/memrar/memrar_handler.c (deleted vs trivial spelling fixes)
This commit is contained in:
commit
df9b29d13e
|
@ -117,8 +117,6 @@ source "drivers/staging/hv/Kconfig"
|
|||
|
||||
source "drivers/staging/vme/Kconfig"
|
||||
|
||||
source "drivers/staging/memrar/Kconfig"
|
||||
|
||||
source "drivers/staging/sep/Kconfig"
|
||||
|
||||
source "drivers/staging/iio/Kconfig"
|
||||
|
|
|
@ -40,7 +40,6 @@ obj-$(CONFIG_VT6655) += vt6655/
|
|||
obj-$(CONFIG_VT6656) += vt6656/
|
||||
obj-$(CONFIG_HYPERV) += hv/
|
||||
obj-$(CONFIG_VME_BUS) += vme/
|
||||
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
|
||||
obj-$(CONFIG_DX_SEP) += sep/
|
||||
obj-$(CONFIG_IIO) += iio/
|
||||
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/
|
||||
|
|
|
@ -6283,7 +6283,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
|
|||
((preamble_type[1] == WLC_MM_PREAMBLE) ==
|
||||
(txh->MModeFbrLen != 0)));
|
||||
|
||||
ac = wme_fifo2ac[queue];
|
||||
ac = skb_get_queue_mapping(p);
|
||||
if (SCB_WME(scb) && qos && wlc->edcf_txop[ac]) {
|
||||
uint frag_dur, dur, dur_fallback;
|
||||
|
||||
|
@ -6919,8 +6919,7 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
|
|||
preamble = 0;
|
||||
if (IS_CCK(rspec)) {
|
||||
if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
|
||||
WL_ERROR("Short CCK\n");
|
||||
rx_status->flag |= RX_FLAG_SHORTPRE;
|
||||
rx_status->flag |= RX_FLAG_SHORTPRE;
|
||||
} else if (IS_OFDM(rspec)) {
|
||||
rx_status->flag |= RX_FLAG_SHORTPRE;
|
||||
} else {
|
||||
|
@ -7079,10 +7078,8 @@ void BCMFASTPATH wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
|
|||
if (ieee80211_is_probe_req(h->frame_control))
|
||||
goto toss;
|
||||
|
||||
if (is_amsdu) {
|
||||
WL_ERROR("%s: is_amsdu causing toss\n", __func__);
|
||||
if (is_amsdu)
|
||||
goto toss;
|
||||
}
|
||||
|
||||
wlc_recvctl(wlc, rxh, p);
|
||||
return;
|
||||
|
|
|
@ -95,47 +95,47 @@ void put_request_value(struct net_device *dev, long lvalue);
|
|||
USHORT hdr_checksum(PPSEUDO_HDR pHdr);
|
||||
|
||||
typedef struct _DSP_FILE_HDR {
|
||||
long build_date;
|
||||
long dsp_coff_date;
|
||||
long loader_code_address;
|
||||
long loader_code_size;
|
||||
long loader_code_end;
|
||||
long dsp_code_address;
|
||||
long dsp_code_size;
|
||||
long dsp_code_end;
|
||||
long reserved[8];
|
||||
u32 build_date;
|
||||
u32 dsp_coff_date;
|
||||
u32 loader_code_address;
|
||||
u32 loader_code_size;
|
||||
u32 loader_code_end;
|
||||
u32 dsp_code_address;
|
||||
u32 dsp_code_size;
|
||||
u32 dsp_code_end;
|
||||
u32 reserved[8];
|
||||
} __attribute__ ((packed)) DSP_FILE_HDR, *PDSP_FILE_HDR;
|
||||
|
||||
typedef struct _DSP_FILE_HDR_5 {
|
||||
long version_id; // Version ID of this image format.
|
||||
long package_id; // Package ID of code release.
|
||||
long build_date; // Date/time stamp when file was built.
|
||||
long commands_offset; // Offset to attached commands in Pseudo Hdr format.
|
||||
long loader_offset; // Offset to bootloader code.
|
||||
long loader_code_address; // Start address of bootloader.
|
||||
long loader_code_end; // Where bootloader code ends.
|
||||
long loader_code_size;
|
||||
long version_data_offset; // Offset were scrambled version data begins.
|
||||
long version_data_size; // Size, in words, of scrambled version data.
|
||||
long nDspImages; // Number of DSP images in file.
|
||||
u32 version_id; // Version ID of this image format.
|
||||
u32 package_id; // Package ID of code release.
|
||||
u32 build_date; // Date/time stamp when file was built.
|
||||
u32 commands_offset; // Offset to attached commands in Pseudo Hdr format.
|
||||
u32 loader_offset; // Offset to bootloader code.
|
||||
u32 loader_code_address; // Start address of bootloader.
|
||||
u32 loader_code_end; // Where bootloader code ends.
|
||||
u32 loader_code_size;
|
||||
u32 version_data_offset; // Offset were scrambled version data begins.
|
||||
u32 version_data_size; // Size, in words, of scrambled version data.
|
||||
u32 nDspImages; // Number of DSP images in file.
|
||||
} __attribute__ ((packed)) DSP_FILE_HDR_5, *PDSP_FILE_HDR_5;
|
||||
|
||||
typedef struct _DSP_IMAGE_INFO {
|
||||
long coff_date; // Date/time when DSP Coff image was built.
|
||||
long begin_offset; // Offset in file where image begins.
|
||||
long end_offset; // Offset in file where image begins.
|
||||
long run_address; // On chip Start address of DSP code.
|
||||
long image_size; // Size of image.
|
||||
long version; // Embedded version # of DSP code.
|
||||
u32 coff_date; // Date/time when DSP Coff image was built.
|
||||
u32 begin_offset; // Offset in file where image begins.
|
||||
u32 end_offset; // Offset in file where image begins.
|
||||
u32 run_address; // On chip Start address of DSP code.
|
||||
u32 image_size; // Size of image.
|
||||
u32 version; // Embedded version # of DSP code.
|
||||
} __attribute__ ((packed)) DSP_IMAGE_INFO, *PDSP_IMAGE_INFO;
|
||||
|
||||
typedef struct _DSP_IMAGE_INFO_V6 {
|
||||
long coff_date; // Date/time when DSP Coff image was built.
|
||||
long begin_offset; // Offset in file where image begins.
|
||||
long end_offset; // Offset in file where image begins.
|
||||
long run_address; // On chip Start address of DSP code.
|
||||
long image_size; // Size of image.
|
||||
long version; // Embedded version # of DSP code.
|
||||
u32 coff_date; // Date/time when DSP Coff image was built.
|
||||
u32 begin_offset; // Offset in file where image begins.
|
||||
u32 end_offset; // Offset in file where image begins.
|
||||
u32 run_address; // On chip Start address of DSP code.
|
||||
u32 image_size; // Size of image.
|
||||
u32 version; // Embedded version # of DSP code.
|
||||
unsigned short checksum; // Dsp File checksum
|
||||
unsigned short pad1;
|
||||
} __attribute__ ((packed)) DSP_IMAGE_INFO_V6, *PDSP_IMAGE_INFO_V6;
|
||||
|
@ -846,8 +846,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
|
|||
break;
|
||||
|
||||
case STATE_DONE_DWNLD:
|
||||
if (((UINT) (pUcFile) - (UINT) pFileStart) >=
|
||||
(UINT) FileLength) {
|
||||
if (((unsigned long) (pUcFile) - (unsigned long) pFileStart) >=
|
||||
(unsigned long) FileLength) {
|
||||
uiState = STATE_DONE_FILE;
|
||||
break;
|
||||
}
|
||||
|
@ -901,11 +901,11 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
|
|||
&info->prov_list);
|
||||
// Move to next entry if available
|
||||
pUcFile =
|
||||
(UCHAR *) ((UINT) pUcFile +
|
||||
(UINT) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(PSEUDO_HDR));
|
||||
if ((UINT) (pUcFile) -
|
||||
(UINT) (pFileStart) >=
|
||||
(UINT) FileLength) {
|
||||
(UCHAR *) ((unsigned long) pUcFile +
|
||||
(unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(PSEUDO_HDR));
|
||||
if ((unsigned long) (pUcFile) -
|
||||
(unsigned long) (pFileStart) >=
|
||||
(unsigned long) FileLength) {
|
||||
uiState =
|
||||
STATE_DONE_FILE;
|
||||
}
|
||||
|
|
|
@ -81,14 +81,14 @@ static void vmbus_setevent(struct vmbus_channel *channel)
|
|||
|
||||
if (channel->offermsg.monitor_allocated) {
|
||||
/* Each u32 represents 32 channels */
|
||||
set_bit(channel->offermsg.child_relid & 31,
|
||||
sync_set_bit(channel->offermsg.child_relid & 31,
|
||||
(unsigned long *) vmbus_connection.send_int_page +
|
||||
(channel->offermsg.child_relid >> 5));
|
||||
|
||||
monitorpage = vmbus_connection.monitor_pages;
|
||||
monitorpage++; /* Get the child to parent monitor page */
|
||||
|
||||
set_bit(channel->monitor_bit,
|
||||
sync_set_bit(channel->monitor_bit,
|
||||
(unsigned long *)&monitorpage->trigger_group
|
||||
[channel->monitor_grp].pending);
|
||||
|
||||
|
@ -104,7 +104,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
|
|||
|
||||
if (Channel->offermsg.monitor_allocated) {
|
||||
/* Each u32 represents 32 channels */
|
||||
clear_bit(Channel->offermsg.child_relid & 31,
|
||||
sync_clear_bit(Channel->offermsg.child_relid & 31,
|
||||
(unsigned long *)vmbus_connection.send_int_page +
|
||||
(Channel->offermsg.child_relid >> 5));
|
||||
|
||||
|
@ -112,7 +112,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
|
|||
vmbus_connection.monitor_pages;
|
||||
monitorPage++; /* Get the child to parent monitor page */
|
||||
|
||||
clear_bit(Channel->monitor_bit,
|
||||
sync_clear_bit(Channel->monitor_bit,
|
||||
(unsigned long *)&monitorPage->trigger_group
|
||||
[Channel->monitor_grp].Pending);
|
||||
}
|
||||
|
|
|
@ -296,7 +296,7 @@ void vmbus_on_event(unsigned long data)
|
|||
for (dword = 0; dword < maxdword; dword++) {
|
||||
if (recv_int_page[dword]) {
|
||||
for (bit = 0; bit < 32; bit++) {
|
||||
if (test_and_clear_bit(bit,
|
||||
if (sync_test_and_clear_bit(bit,
|
||||
(unsigned long *)
|
||||
&recv_int_page[dword])) {
|
||||
relid = (dword << 5) + bit;
|
||||
|
@ -338,7 +338,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
|
|||
int vmbus_set_event(u32 child_relid)
|
||||
{
|
||||
/* Each u32 represents 32 channels */
|
||||
set_bit(child_relid & 31,
|
||||
sync_set_bit(child_relid & 31,
|
||||
(unsigned long *)vmbus_connection.send_int_page +
|
||||
(child_relid >> 5));
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -375,7 +376,7 @@ static void MousevscOnReceiveDeviceInfo(struct mousevsc_dev *InputDevice, struct
|
|||
desc->desc[0].wDescriptorLength);
|
||||
|
||||
/* Send the ack */
|
||||
memset(&ack, sizeof(struct mousevsc_prt_msg), 0);
|
||||
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
|
||||
|
||||
ack.type = PipeMessageData;
|
||||
ack.size = sizeof(struct synthhid_device_info_ack);
|
||||
|
@ -596,7 +597,7 @@ static int MousevscConnectToVsp(struct hv_device *Device)
|
|||
/*
|
||||
* Now, initiate the vsc/vsp initialization protocol on the open channel
|
||||
*/
|
||||
memset(request, sizeof(struct mousevsc_prt_msg), 0);
|
||||
memset(request, 0, sizeof(struct mousevsc_prt_msg));
|
||||
|
||||
request->type = PipeMessageData;
|
||||
request->size = sizeof(struct synthhid_protocol_request);
|
||||
|
|
|
@ -46,6 +46,7 @@ struct net_device_context {
|
|||
/* point back to our device context */
|
||||
struct hv_device *device_ctx;
|
||||
unsigned long avail;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
|
||||
|
@ -219,6 +220,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
|
|||
unsigned int status)
|
||||
{
|
||||
struct net_device *net = dev_get_drvdata(&device_obj->device);
|
||||
struct net_device_context *ndev_ctx;
|
||||
|
||||
if (!net) {
|
||||
DPRINT_ERR(NETVSC_DRV, "got link status but net device "
|
||||
|
@ -230,6 +232,8 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
|
|||
netif_carrier_on(net);
|
||||
netif_wake_queue(net);
|
||||
netif_notify_peers(net);
|
||||
ndev_ctx = netdev_priv(net);
|
||||
schedule_work(&ndev_ctx->work);
|
||||
} else {
|
||||
netif_carrier_off(net);
|
||||
netif_stop_queue(net);
|
||||
|
@ -328,6 +332,25 @@ static const struct net_device_ops device_ops = {
|
|||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
/*
|
||||
* Send GARP packet to network peers after migrations.
|
||||
* After Quick Migration, the network is not immediately operational in the
|
||||
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
|
||||
* another netif_notify_peers() into a scheduled work, otherwise GARP packet
|
||||
* will not be sent after quick migration, and cause network disconnection.
|
||||
*/
|
||||
static void netvsc_send_garp(struct work_struct *w)
|
||||
{
|
||||
struct net_device_context *ndev_ctx;
|
||||
struct net_device *net;
|
||||
|
||||
msleep(20);
|
||||
ndev_ctx = container_of(w, struct net_device_context, work);
|
||||
net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
|
||||
netif_notify_peers(net);
|
||||
}
|
||||
|
||||
|
||||
static int netvsc_probe(struct device *device)
|
||||
{
|
||||
struct hv_driver *drv =
|
||||
|
@ -353,6 +376,7 @@ static int netvsc_probe(struct device *device)
|
|||
net_device_ctx->device_ctx = device_obj;
|
||||
net_device_ctx->avail = ring_size;
|
||||
dev_set_drvdata(device, net);
|
||||
INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
|
||||
|
||||
/* Notify the netvsc driver of the new device */
|
||||
ret = net_drv_obj->base.dev_add(device_obj, &device_info);
|
||||
|
|
|
@ -102,22 +102,22 @@ static char kvp_send_buffer[4096];
|
|||
static char kvp_recv_buffer[4096];
|
||||
static struct sockaddr_nl addr;
|
||||
|
||||
static char os_name[100];
|
||||
static char os_major[50];
|
||||
static char os_minor[50];
|
||||
static char processor_arch[50];
|
||||
static char os_build[100];
|
||||
static char *os_name = "";
|
||||
static char *os_major = "";
|
||||
static char *os_minor = "";
|
||||
static char *processor_arch;
|
||||
static char *os_build;
|
||||
static char *lic_version;
|
||||
static struct utsname uts_buf;
|
||||
|
||||
void kvp_get_os_info(void)
|
||||
{
|
||||
FILE *file;
|
||||
char *eol;
|
||||
struct utsname buf;
|
||||
char *p, buf[512];
|
||||
|
||||
uname(&buf);
|
||||
strcpy(os_build, buf.release);
|
||||
strcpy(processor_arch, buf.machine);
|
||||
uname(&uts_buf);
|
||||
os_build = uts_buf.release;
|
||||
processor_arch= uts_buf.machine;
|
||||
|
||||
file = fopen("/etc/SuSE-release", "r");
|
||||
if (file != NULL)
|
||||
|
@ -132,21 +132,46 @@ void kvp_get_os_info(void)
|
|||
/*
|
||||
* We don't have information about the os.
|
||||
*/
|
||||
strcpy(os_name, "Linux");
|
||||
strcpy(os_major, "0");
|
||||
strcpy(os_minor, "0");
|
||||
os_name = uts_buf.sysname;
|
||||
return;
|
||||
|
||||
kvp_osinfo_found:
|
||||
fgets(os_name, 99, file);
|
||||
eol = index(os_name, '\n');
|
||||
*eol = '\0';
|
||||
fgets(os_major, 49, file);
|
||||
eol = index(os_major, '\n');
|
||||
*eol = '\0';
|
||||
fgets(os_minor, 49, file);
|
||||
eol = index(os_minor, '\n');
|
||||
*eol = '\0';
|
||||
/* up to three lines */
|
||||
p = fgets(buf, sizeof(buf), file);
|
||||
if (p) {
|
||||
p = strchr(buf, '\n');
|
||||
if (p)
|
||||
*p = '\0';
|
||||
p = strdup(buf);
|
||||
if (!p)
|
||||
goto done;
|
||||
os_name = p;
|
||||
|
||||
/* second line */
|
||||
p = fgets(buf, sizeof(buf), file);
|
||||
if (p) {
|
||||
p = strchr(buf, '\n');
|
||||
if (p)
|
||||
*p = '\0';
|
||||
p = strdup(buf);
|
||||
if (!p)
|
||||
goto done;
|
||||
os_major = p;
|
||||
|
||||
/* third line */
|
||||
p = fgets(buf, sizeof(buf), file);
|
||||
if (p) {
|
||||
p = strchr(buf, '\n');
|
||||
if (p)
|
||||
*p = '\0';
|
||||
p = strdup(buf);
|
||||
if (p)
|
||||
os_minor = p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
fclose(file);
|
||||
return;
|
||||
}
|
||||
|
@ -293,7 +318,7 @@ netlink_send(int fd, struct cn_msg *msg)
|
|||
return sendmsg(fd, &message, 0);
|
||||
}
|
||||
|
||||
main(void)
|
||||
int main(void)
|
||||
{
|
||||
int fd, len, sock_opt;
|
||||
int error;
|
||||
|
@ -301,9 +326,10 @@ main(void)
|
|||
struct pollfd pfd;
|
||||
struct nlmsghdr *incoming_msg;
|
||||
struct cn_msg *incoming_cn_msg;
|
||||
struct hv_ku_msg *hv_msg;
|
||||
char *p;
|
||||
char *key_value;
|
||||
char *key_name;
|
||||
int key_index;
|
||||
|
||||
daemon(1, 0);
|
||||
openlog("KVP", 0, LOG_USER);
|
||||
|
@ -373,9 +399,10 @@ main(void)
|
|||
* Driver is registering with us; stash away the version
|
||||
* information.
|
||||
*/
|
||||
lic_version = malloc(strlen(incoming_cn_msg->data) + 1);
|
||||
p = (char *)incoming_cn_msg->data;
|
||||
lic_version = malloc(strlen(p) + 1);
|
||||
if (lic_version) {
|
||||
strcpy(lic_version, incoming_cn_msg->data);
|
||||
strcpy(lic_version, p);
|
||||
syslog(LOG_INFO, "KVP LIC Version: %s",
|
||||
lic_version);
|
||||
} else {
|
||||
|
@ -389,14 +416,11 @@ main(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
key_index =
|
||||
((struct hv_ku_msg *)incoming_cn_msg->data)->kvp_index;
|
||||
key_name =
|
||||
((struct hv_ku_msg *)incoming_cn_msg->data)->kvp_key;
|
||||
key_value =
|
||||
((struct hv_ku_msg *)incoming_cn_msg->data)->kvp_value;
|
||||
hv_msg = (struct hv_ku_msg *)incoming_cn_msg->data;
|
||||
key_name = (char *)hv_msg->kvp_key;
|
||||
key_value = (char *)hv_msg->kvp_value;
|
||||
|
||||
switch (key_index) {
|
||||
switch (hv_msg->kvp_index) {
|
||||
case FullyQualifiedDomainName:
|
||||
kvp_get_domain_name(key_value,
|
||||
HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
|
||||
|
|
|
@ -254,7 +254,7 @@ static int vmbus_on_isr(void)
|
|||
event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
|
||||
|
||||
/* Since we are a child, we only need to check bit 0 */
|
||||
if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
|
||||
if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
|
||||
DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]);
|
||||
ret |= 0x2;
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "channel_mgmt.h"
|
||||
#include "ring_buffer.h"
|
||||
#include <linux/list.h>
|
||||
#include <asm/sync_bitops.h>
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -17,7 +17,8 @@
|
|||
#ifndef SPI_ADIS16400_H_
|
||||
#define SPI_ADIS16400_H_
|
||||
|
||||
#define ADIS16400_STARTUP_DELAY 220 /* ms */
|
||||
#define ADIS16400_STARTUP_DELAY 290 /* ms */
|
||||
#define ADIS16400_MTEST_DELAY 90 /* ms */
|
||||
|
||||
#define ADIS16400_READ_REG(a) a
|
||||
#define ADIS16400_WRITE_REG(a) ((a) | 0x80)
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
*
|
||||
* Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
|
||||
* Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
|
||||
* Copyright (c) 2011 Analog Devices Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -93,7 +94,6 @@ static int adis16400_spi_write_reg_16(struct device *dev,
|
|||
.tx_buf = st->tx + 2,
|
||||
.bits_per_word = 8,
|
||||
.len = 2,
|
||||
.cs_change = 1,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -137,7 +137,6 @@ static int adis16400_spi_read_reg_16(struct device *dev,
|
|||
.rx_buf = st->rx,
|
||||
.bits_per_word = 8,
|
||||
.len = 2,
|
||||
.cs_change = 1,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -375,7 +374,7 @@ static int adis16400_self_test(struct device *dev)
|
|||
dev_err(dev, "problem starting self test");
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
msleep(ADIS16400_MTEST_DELAY);
|
||||
adis16400_check_status(dev);
|
||||
|
||||
err_ret:
|
||||
|
@ -471,10 +470,11 @@ static int adis16400_initial_setup(struct adis16400_state *st)
|
|||
if (ret)
|
||||
goto err_ret;
|
||||
|
||||
if (prod_id != ADIS16400_PRODUCT_ID_DEFAULT)
|
||||
if ((prod_id & 0xF000) != ADIS16400_PRODUCT_ID_DEFAULT)
|
||||
dev_warn(dev, "unknown product id");
|
||||
|
||||
printk(KERN_INFO DRIVER_NAME ": prod_id 0x%04x at CS%d (irq %d)\n",
|
||||
|
||||
dev_info(dev, ": prod_id 0x%04x at CS%d (irq %d)\n",
|
||||
prod_id, st->us->chip_select, st->us->irq);
|
||||
|
||||
/* use high spi speed if possible */
|
||||
|
@ -497,12 +497,12 @@ err_ret:
|
|||
_reg)
|
||||
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_X, ADIS16400_XGYRO_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_XGYRO_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_XGYRO_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_YGYRO_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_ZGYRO_OFF);
|
||||
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_X, ADIS16400_XACCL_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_XACCL_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_XACCL_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_YACCL_OFF);
|
||||
static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_ZACCL_OFF);
|
||||
|
||||
|
||||
static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16400_read_14bit_signed,
|
||||
|
@ -647,7 +647,7 @@ static int __devinit adis16400_probe(struct spi_device *spi)
|
|||
|
||||
ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to initialize the ring\n");
|
||||
dev_err(&spi->dev, "failed to initialize the ring\n");
|
||||
goto error_unreg_ring_funcs;
|
||||
}
|
||||
|
||||
|
|
|
@ -122,12 +122,10 @@ static int adis16400_spi_read_burst(struct device *dev, u8 *rx)
|
|||
.tx_buf = st->tx,
|
||||
.bits_per_word = 8,
|
||||
.len = 2,
|
||||
.cs_change = 0,
|
||||
}, {
|
||||
.rx_buf = rx,
|
||||
.bits_per_word = 8,
|
||||
.len = 24,
|
||||
.cs_change = 1,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -162,9 +160,10 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
|
|||
work_trigger_to_ring);
|
||||
struct iio_ring_buffer *ring = st->indio_dev->ring;
|
||||
|
||||
int i = 0;
|
||||
int i = 0, j;
|
||||
s16 *data;
|
||||
size_t datasize = ring->access.get_bytes_per_datum(ring);
|
||||
unsigned long mask = ring->scan_mask;
|
||||
|
||||
data = kmalloc(datasize , GFP_KERNEL);
|
||||
if (data == NULL) {
|
||||
|
@ -174,9 +173,12 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
|
|||
|
||||
if (ring->scan_count)
|
||||
if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
|
||||
for (; i < ring->scan_count; i++)
|
||||
for (; i < ring->scan_count; i++) {
|
||||
j = __ffs(mask);
|
||||
mask &= ~(1 << j);
|
||||
data[i] = be16_to_cpup(
|
||||
(__be16 *)&(st->rx[i*2]));
|
||||
(__be16 *)&(st->rx[j*2]));
|
||||
}
|
||||
|
||||
/* Guaranteed to be aligned with 8 byte boundary */
|
||||
if (ring->scan_timestamp)
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
config MRST_RAR_HANDLER
|
||||
tristate "RAR handler driver for Intel Moorestown platform"
|
||||
depends on RAR_REGISTER
|
||||
---help---
|
||||
This driver provides a memory management interface to
|
||||
restricted access regions (RAR) available on the Intel
|
||||
Moorestown platform.
|
||||
|
||||
Once locked down, restricted access regions are only
|
||||
accessible by specific hardware on the platform. The x86
|
||||
CPU is typically not one of those platforms. As such this
|
||||
driver does not access RAR, and only provides a buffer
|
||||
allocation/bookkeeping mechanism.
|
||||
|
||||
If unsure, say N.
|
|
@ -1,2 +0,0 @@
|
|||
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
|
||||
memrar-y := memrar_allocator.o memrar_handler.o
|
|
@ -1,43 +0,0 @@
|
|||
RAR Handler (memrar) Driver TODO Items
|
||||
======================================
|
||||
|
||||
Maintainer: Eugene Epshteyn <eugene.epshteyn@intel.com>
|
||||
|
||||
memrar.h
|
||||
--------
|
||||
1. This header exposes the driver's user space and kernel space
|
||||
interfaces. It should be moved to <linux/rar/memrar.h>, or
|
||||
something along those lines, when this memrar driver is moved out
|
||||
of `staging'.
|
||||
a. It would be ideal if staging/rar_register/rar_register.h was
|
||||
moved to the same directory.
|
||||
|
||||
memrar_allocator.[ch]
|
||||
---------------------
|
||||
1. Address potential fragmentation issues with the memrar_allocator.
|
||||
|
||||
2. Hide struct memrar_allocator details/fields. They need not be
|
||||
exposed to the user.
|
||||
a. Forward declare struct memrar_allocator.
|
||||
b. Move all three struct definitions to `memrar_allocator.c'
|
||||
source file.
|
||||
c. Add a memrar_allocator_largest_free_area() function, or
|
||||
something like that to get access to the value of the struct
|
||||
memrar_allocator "largest_free_area" field. This allows the
|
||||
struct memrar_allocator fields to be completely hidden from
|
||||
the user. The memrar_handler code really only needs this for
|
||||
statistic gathering on-demand.
|
||||
d. Do the same for the "capacity" field as the
|
||||
"largest_free_area" field.
|
||||
|
||||
3. Move memrar_allocator.* to kernel `lib' directory since it is HW
|
||||
neutral.
|
||||
a. Alternatively, use lib/genalloc.c instead.
|
||||
b. A kernel port of Doug Lea's malloc() implementation may also
|
||||
be an option.
|
||||
|
||||
memrar_handler.c
|
||||
----------------
|
||||
1. Split user space interface (ioctl code) from core/kernel code,
|
||||
e.g.:
|
||||
memrar_handler.c -> memrar_core.c, memrar_user.c
|
|
@ -1,89 +0,0 @@
|
|||
What: /dev/memrar
|
||||
Date: March 2010
|
||||
KernelVersion: 2.6.34
|
||||
Contact: Eugene Epshteyn <eugene.epshteyn@intel.com>
|
||||
Description: The Intel Moorestown Restricted Access Region (RAR)
|
||||
Handler driver exposes an ioctl() based interface that
|
||||
allows a user to reserve and release blocks of RAR
|
||||
memory.
|
||||
|
||||
Note: A sysfs based one was not appropriate for the
|
||||
RAR handler's usage model.
|
||||
|
||||
=========================================================
|
||||
ioctl() Requests
|
||||
=========================================================
|
||||
RAR_HANDLER_RESERVE
|
||||
-------------------
|
||||
Description: Reserve RAR block.
|
||||
Type: struct RAR_block_info
|
||||
Direction: in/out
|
||||
Errors: EINVAL (invalid RAR type or size)
|
||||
ENOMEM (not enough RAR memory)
|
||||
|
||||
RAR_HANDLER_STAT
|
||||
----------------
|
||||
Description: Get RAR statistics.
|
||||
Type: struct RAR_stat
|
||||
Direction: in/out
|
||||
Errors: EINVAL (invalid RAR type)
|
||||
|
||||
RAR_HANDLER_RELEASE
|
||||
-------------------
|
||||
Description: Release previously reserved RAR block.
|
||||
Type: 32 bit unsigned integer
|
||||
(e.g. uint32_t), i.e the RAR "handle".
|
||||
Direction: in
|
||||
Errors: EINVAL (invalid RAR handle)
|
||||
|
||||
|
||||
=========================================================
|
||||
ioctl() Request Parameter Types
|
||||
=========================================================
|
||||
The structures referred to above are defined as
|
||||
follows:
|
||||
|
||||
/**
|
||||
* struct RAR_block_info - user space struct that
|
||||
* describes RAR buffer
|
||||
* @type: Type of RAR memory (e.g.,
|
||||
* RAR_TYPE_VIDEO or RAR_TYPE_AUDIO) [in]
|
||||
* @size: Requested size of a block in bytes to
|
||||
* be reserved in RAR. [in]
|
||||
* @handle: Handle that can be used to refer to
|
||||
* reserved block. [out]
|
||||
*
|
||||
* This is the basic structure exposed to the user
|
||||
* space that describes a given RAR buffer. It used
|
||||
* as the parameter for the RAR_HANDLER_RESERVE ioctl.
|
||||
* The buffer's underlying bus address is not exposed
|
||||
* to the user. User space code refers to the buffer
|
||||
* entirely by "handle".
|
||||
*/
|
||||
struct RAR_block_info {
|
||||
__u32 type;
|
||||
__u32 size;
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct RAR_stat - RAR statistics structure
|
||||
* @type: Type of RAR memory (e.g.,
|
||||
* RAR_TYPE_VIDEO or
|
||||
* RAR_TYPE_AUDIO) [in]
|
||||
* @capacity: Total size of RAR memory
|
||||
* region. [out]
|
||||
* @largest_block_size: Size of the largest reservable
|
||||
* block. [out]
|
||||
*
|
||||
* This structure is used for RAR_HANDLER_STAT ioctl.
|
||||
*/
|
||||
struct RAR_stat {
|
||||
__u32 type;
|
||||
__u32 capacity;
|
||||
__u32 largest_block_size;
|
||||
};
|
||||
|
||||
Lastly, the RAR_HANDLER_RELEASE ioctl expects a
|
||||
"handle" to the RAR block of memory. It is a 32 bit
|
||||
unsigned integer.
|
|
@ -1,174 +0,0 @@
|
|||
/*
|
||||
* RAR Handler (/dev/memrar) internal driver API.
|
||||
* Copyright (C) 2010 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General
|
||||
* Public License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be
|
||||
* useful, but WITHOUT ANY WARRANTY; without even the implied
|
||||
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
||||
* PURPOSE. See the GNU General Public License for more details.
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the Free
|
||||
* Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 02111-1307, USA.
|
||||
* The full GNU General Public License is included in this
|
||||
* distribution in the file called COPYING.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _MEMRAR_H
|
||||
#define _MEMRAR_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
||||
/**
|
||||
* struct RAR_stat - RAR statistics structure
|
||||
* @type: Type of RAR memory (e.g., audio vs. video)
|
||||
* @capacity: Total size of RAR memory region.
|
||||
* @largest_block_size: Size of the largest reservable block.
|
||||
*
|
||||
* This structure is used for RAR_HANDLER_STAT ioctl and for the
|
||||
* RAR_get_stat() user space wrapper function.
|
||||
*/
|
||||
struct RAR_stat {
|
||||
__u32 type;
|
||||
__u32 capacity;
|
||||
__u32 largest_block_size;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct RAR_block_info - user space struct that describes RAR buffer
|
||||
* @type: Type of RAR memory (e.g., audio vs. video)
|
||||
* @size: Requested size of a block to be reserved in RAR.
|
||||
* @handle: Handle that can be used to refer to reserved block.
|
||||
*
|
||||
* This is the basic structure exposed to the user space that
|
||||
* describes a given RAR buffer. The buffer's underlying bus address
|
||||
* is not exposed to the user. User space code refers to the buffer
|
||||
* entirely by "handle".
|
||||
*/
|
||||
struct RAR_block_info {
|
||||
__u32 type;
|
||||
__u32 size;
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
|
||||
#define RAR_IOCTL_BASE 0xE0
|
||||
|
||||
/* Reserve RAR block. */
|
||||
#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
|
||||
|
||||
/* Release previously reserved RAR block. */
|
||||
#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
|
||||
|
||||
/* Get RAR stats. */
|
||||
#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* -------------------------------------------------------------- */
|
||||
/* Kernel Side RAR Handler Interface */
|
||||
/* -------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* struct RAR_buffer - kernel space struct that describes RAR buffer
|
||||
* @info: structure containing base RAR buffer information
|
||||
* @bus_address: buffer bus address
|
||||
*
|
||||
* Structure that contains all information related to a given block of
|
||||
* memory in RAR. It is generally only used when retrieving RAR
|
||||
* related bus addresses.
|
||||
*
|
||||
* Note: This structure is used only by RAR-enabled drivers, and is
|
||||
* not intended to be exposed to the user space.
|
||||
*/
|
||||
struct RAR_buffer {
|
||||
struct RAR_block_info info;
|
||||
dma_addr_t bus_address;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_MRST_RAR_HANDLER)
|
||||
/**
|
||||
* rar_reserve() - reserve RAR buffers
|
||||
* @buffers: array of RAR_buffers where type and size of buffers to
|
||||
* reserve are passed in, handle and bus address are
|
||||
* passed out
|
||||
* @count: number of RAR_buffers in the "buffers" array
|
||||
*
|
||||
* This function will reserve buffers in the restricted access regions
|
||||
* of given types.
|
||||
*
|
||||
* It returns the number of successfully reserved buffers. Successful
|
||||
* buffer reservations will have the corresponding bus_address field
|
||||
* set to a non-zero value in the given buffers vector.
|
||||
*/
|
||||
extern size_t rar_reserve(struct RAR_buffer *buffers,
|
||||
size_t count);
|
||||
|
||||
/**
|
||||
* rar_release() - release RAR buffers
|
||||
* @buffers: array of RAR_buffers where handles to buffers to be
|
||||
* released are passed in
|
||||
* @count: number of RAR_buffers in the "buffers" array
|
||||
*
|
||||
* This function will release RAR buffers that were retrieved through
|
||||
* a call to rar_reserve() or rar_handle_to_bus() by decrementing the
|
||||
* reference count. The RAR buffer will be reclaimed when the
|
||||
* reference count drops to zero.
|
||||
*
|
||||
* It returns the number of successfully released buffers. Successful
|
||||
* releases will have their handle field set to zero in the given
|
||||
* buffers vector.
|
||||
*/
|
||||
extern size_t rar_release(struct RAR_buffer *buffers,
|
||||
size_t count);
|
||||
|
||||
/**
|
||||
* rar_handle_to_bus() - convert a vector of RAR handles to bus addresses
|
||||
* @buffers: array of RAR_buffers containing handles to be
|
||||
* converted to bus_addresses
|
||||
* @count: number of RAR_buffers in the "buffers" array
|
||||
|
||||
* This function will retrieve the RAR buffer bus addresses, type and
|
||||
* size corresponding to the RAR handles provided in the buffers
|
||||
* vector.
|
||||
*
|
||||
* It returns the number of successfully converted buffers. The bus
|
||||
* address will be set to 0 for unrecognized handles.
|
||||
*
|
||||
* The reference count for each corresponding buffer in RAR will be
|
||||
* incremented. Call rar_release() when done with the buffers.
|
||||
*/
|
||||
extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
|
||||
size_t count);
|
||||
|
||||
#else
|
||||
|
||||
extern inline size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern inline size_t rar_release(struct RAR_buffer *buffers, size_t count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern inline size_t rar_handle_to_bus(struct RAR_buffer *buffers,
|
||||
size_t count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* MRST_RAR_HANDLER */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _MEMRAR_H */
|
|
@ -1,432 +0,0 @@
|
|||
/*
|
||||
* memrar_allocator 1.0: An allocator for Intel RAR.
|
||||
*
|
||||
* Copyright (C) 2010 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General
|
||||
* Public License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be
|
||||
* useful, but WITHOUT ANY WARRANTY; without even the implied
|
||||
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
||||
* PURPOSE. See the GNU General Public License for more details.
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the Free
|
||||
* Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 02111-1307, USA.
|
||||
* The full GNU General Public License is included in this
|
||||
* distribution in the file called COPYING.
|
||||
*
|
||||
*
|
||||
* ------------------------------------------------------------------
|
||||
*
|
||||
* This simple allocator implementation provides a
|
||||
* malloc()/free()-like interface for reserving space within a
|
||||
* previously reserved block of memory. It is not specific to
|
||||
* any hardware, nor is it coupled with the lower level paging
|
||||
* mechanism.
|
||||
*
|
||||
* The primary goal of this implementation is to provide a means
|
||||
* to partition an arbitrary block of memory without actually
|
||||
* accessing the memory or incurring any hardware side-effects
|
||||
* (e.g. paging). It is, in effect, a bookkeeping mechanism for
|
||||
* buffers.
|
||||
*/
|
||||
|
||||
|
||||
#include "memrar_allocator.h"
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
|
||||
struct memrar_allocator *memrar_create_allocator(unsigned long base,
|
||||
size_t capacity,
|
||||
size_t block_size)
|
||||
{
|
||||
struct memrar_allocator *allocator = NULL;
|
||||
struct memrar_address_ranges *first_node = NULL;
|
||||
|
||||
/*
|
||||
* Make sure the base address is aligned on a block_size
|
||||
* boundary.
|
||||
*
|
||||
* @todo Is this necessary?
|
||||
*/
|
||||
/* base = ALIGN(base, block_size); */
|
||||
|
||||
/* Validate parameters.
|
||||
*
|
||||
* Make sure we can allocate the entire memory space. Zero
|
||||
* capacity or block size are obviously invalid.
|
||||
*/
|
||||
if (base == 0
|
||||
|| capacity == 0
|
||||
|| block_size == 0
|
||||
|| ULONG_MAX - capacity < base
|
||||
|| capacity < block_size)
|
||||
return allocator;
|
||||
|
||||
/*
|
||||
* There isn't much point in creating a memory allocator that
|
||||
* is only capable of holding one block but we'll allow it,
|
||||
* and issue a diagnostic.
|
||||
*/
|
||||
WARN(capacity < block_size * 2,
|
||||
"memrar: Only one block available to allocator.\n");
|
||||
|
||||
allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
|
||||
|
||||
if (allocator == NULL)
|
||||
return allocator;
|
||||
|
||||
mutex_init(&allocator->lock);
|
||||
allocator->base = base;
|
||||
|
||||
/* Round the capacity down to a multiple of block_size. */
|
||||
allocator->capacity = (capacity / block_size) * block_size;
|
||||
|
||||
allocator->block_size = block_size;
|
||||
|
||||
allocator->largest_free_area = allocator->capacity;
|
||||
|
||||
/* Initialize the handle and free lists. */
|
||||
INIT_LIST_HEAD(&allocator->allocated_list.list);
|
||||
INIT_LIST_HEAD(&allocator->free_list.list);
|
||||
|
||||
first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
|
||||
if (first_node == NULL) {
|
||||
kfree(allocator);
|
||||
allocator = NULL;
|
||||
} else {
|
||||
/* Full range of blocks is available. */
|
||||
first_node->range.begin = base;
|
||||
first_node->range.end = base + allocator->capacity;
|
||||
list_add(&first_node->list,
|
||||
&allocator->free_list.list);
|
||||
}
|
||||
|
||||
return allocator;
|
||||
}
|
||||
|
||||
void memrar_destroy_allocator(struct memrar_allocator *allocator)
|
||||
{
|
||||
/*
|
||||
* Assume that the memory allocator lock isn't held at this
|
||||
* point in time. Caller must ensure that.
|
||||
*/
|
||||
|
||||
struct memrar_address_ranges *pos = NULL;
|
||||
struct memrar_address_ranges *n = NULL;
|
||||
|
||||
if (allocator == NULL)
|
||||
return;
|
||||
|
||||
mutex_lock(&allocator->lock);
|
||||
|
||||
/* Reclaim free list resources. */
|
||||
list_for_each_entry_safe(pos,
|
||||
n,
|
||||
&allocator->free_list.list,
|
||||
list) {
|
||||
list_del(&pos->list);
|
||||
kfree(pos);
|
||||
}
|
||||
|
||||
mutex_unlock(&allocator->lock);
|
||||
|
||||
kfree(allocator);
|
||||
}
|
||||
|
||||
unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
|
||||
size_t size)
|
||||
{
|
||||
struct memrar_address_ranges *pos = NULL;
|
||||
|
||||
size_t num_blocks;
|
||||
unsigned long reserved_bytes;
|
||||
|
||||
/*
|
||||
* Address of allocated buffer. We assume that zero is not a
|
||||
* valid address.
|
||||
*/
|
||||
unsigned long addr = 0;
|
||||
|
||||
if (allocator == NULL || size == 0)
|
||||
return addr;
|
||||
|
||||
/* Reserve enough blocks to hold the amount of bytes requested. */
|
||||
num_blocks = DIV_ROUND_UP(size, allocator->block_size);
|
||||
|
||||
reserved_bytes = num_blocks * allocator->block_size;
|
||||
|
||||
mutex_lock(&allocator->lock);
|
||||
|
||||
if (reserved_bytes > allocator->largest_free_area) {
|
||||
mutex_unlock(&allocator->lock);
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate through the free list to find a suitably sized
|
||||
* range of free contiguous memory blocks.
|
||||
*
|
||||
* We also take the opportunity to reset the size of the
|
||||
* largest free area size statistic.
|
||||
*/
|
||||
list_for_each_entry(pos, &allocator->free_list.list, list) {
|
||||
struct memrar_address_range * const fr = &pos->range;
|
||||
size_t const curr_size = fr->end - fr->begin;
|
||||
|
||||
if (curr_size >= reserved_bytes && addr == 0) {
|
||||
struct memrar_address_range *range = NULL;
|
||||
struct memrar_address_ranges * const new_node =
|
||||
kmalloc(sizeof(*new_node), GFP_KERNEL);
|
||||
|
||||
if (new_node == NULL)
|
||||
break;
|
||||
|
||||
list_add(&new_node->list,
|
||||
&allocator->allocated_list.list);
|
||||
|
||||
/*
|
||||
* Carve out area of memory from end of free
|
||||
* range.
|
||||
*/
|
||||
range = &new_node->range;
|
||||
range->end = fr->end;
|
||||
fr->end -= reserved_bytes;
|
||||
range->begin = fr->end;
|
||||
addr = range->begin;
|
||||
|
||||
/*
|
||||
* Check if largest area has decreased in
|
||||
* size. We'll need to continue scanning for
|
||||
* the next largest area if it has.
|
||||
*/
|
||||
if (curr_size == allocator->largest_free_area)
|
||||
allocator->largest_free_area -=
|
||||
reserved_bytes;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset largest free area size statistic as needed,
|
||||
* but only if we've actually allocated memory.
|
||||
*/
|
||||
if (addr != 0
|
||||
&& curr_size > allocator->largest_free_area) {
|
||||
allocator->largest_free_area = curr_size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&allocator->lock);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
long memrar_allocator_free(struct memrar_allocator *allocator,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct list_head *pos = NULL;
|
||||
struct list_head *tmp = NULL;
|
||||
struct list_head *dst = NULL;
|
||||
|
||||
struct memrar_address_ranges *allocated = NULL;
|
||||
struct memrar_address_range const *handle = NULL;
|
||||
|
||||
unsigned long old_end = 0;
|
||||
unsigned long new_chunk_size = 0;
|
||||
|
||||
if (allocator == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr == 0)
|
||||
return 0; /* Ignore "free(0)". */
|
||||
|
||||
mutex_lock(&allocator->lock);
|
||||
|
||||
/* Find the corresponding handle. */
|
||||
list_for_each_entry(allocated,
|
||||
&allocator->allocated_list.list,
|
||||
list) {
|
||||
if (allocated->range.begin == addr) {
|
||||
handle = &allocated->range;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* No such buffer created by this allocator. */
|
||||
if (handle == NULL) {
|
||||
mutex_unlock(&allocator->lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Coalesce adjacent chunks of memory if possible.
|
||||
*
|
||||
* @note This isn't full blown coalescing since we're only
|
||||
* coalescing at most three chunks of memory.
|
||||
*/
|
||||
list_for_each_safe(pos, tmp, &allocator->free_list.list) {
|
||||
/* @todo O(n) performance. Optimize. */
|
||||
|
||||
struct memrar_address_range * const chunk =
|
||||
&list_entry(pos,
|
||||
struct memrar_address_ranges,
|
||||
list)->range;
|
||||
|
||||
/* Extend size of existing free adjacent chunk. */
|
||||
if (chunk->end == handle->begin) {
|
||||
/*
|
||||
* Chunk "less than" than the one we're
|
||||
* freeing is adjacent.
|
||||
*
|
||||
* Before:
|
||||
*
|
||||
* +-----+------+
|
||||
* |chunk|handle|
|
||||
* +-----+------+
|
||||
*
|
||||
* After:
|
||||
*
|
||||
* +------------+
|
||||
* | chunk |
|
||||
* +------------+
|
||||
*/
|
||||
|
||||
struct memrar_address_ranges const * const next =
|
||||
list_entry(pos->next,
|
||||
struct memrar_address_ranges,
|
||||
list);
|
||||
|
||||
chunk->end = handle->end;
|
||||
|
||||
/*
|
||||
* Now check if next free chunk is adjacent to
|
||||
* the current extended free chunk.
|
||||
*
|
||||
* Before:
|
||||
*
|
||||
* +------------+----+
|
||||
* | chunk |next|
|
||||
* +------------+----+
|
||||
*
|
||||
* After:
|
||||
*
|
||||
* +-----------------+
|
||||
* | chunk |
|
||||
* +-----------------+
|
||||
*/
|
||||
if (!list_is_singular(pos)
|
||||
&& chunk->end == next->range.begin) {
|
||||
chunk->end = next->range.end;
|
||||
list_del(pos->next);
|
||||
kfree(next);
|
||||
}
|
||||
|
||||
list_del(&allocated->list);
|
||||
|
||||
new_chunk_size = chunk->end - chunk->begin;
|
||||
|
||||
goto exit_memrar_free;
|
||||
|
||||
} else if (handle->end == chunk->begin) {
|
||||
/*
|
||||
* Chunk "greater than" than the one we're
|
||||
* freeing is adjacent.
|
||||
*
|
||||
* +------+-----+
|
||||
* |handle|chunk|
|
||||
* +------+-----+
|
||||
*
|
||||
* After:
|
||||
*
|
||||
* +------------+
|
||||
* | chunk |
|
||||
* +------------+
|
||||
*/
|
||||
|
||||
struct memrar_address_ranges const * const prev =
|
||||
list_entry(pos->prev,
|
||||
struct memrar_address_ranges,
|
||||
list);
|
||||
|
||||
chunk->begin = handle->begin;
|
||||
|
||||
/*
|
||||
* Now check if previous free chunk is
|
||||
* adjacent to the current extended free
|
||||
* chunk.
|
||||
*
|
||||
*
|
||||
* Before:
|
||||
*
|
||||
* +----+------------+
|
||||
* |prev| chunk |
|
||||
* +----+------------+
|
||||
*
|
||||
* After:
|
||||
*
|
||||
* +-----------------+
|
||||
* | chunk |
|
||||
* +-----------------+
|
||||
*/
|
||||
if (!list_is_singular(pos)
|
||||
&& prev->range.end == chunk->begin) {
|
||||
chunk->begin = prev->range.begin;
|
||||
list_del(pos->prev);
|
||||
kfree(prev);
|
||||
}
|
||||
|
||||
list_del(&allocated->list);
|
||||
|
||||
new_chunk_size = chunk->end - chunk->begin;
|
||||
|
||||
goto exit_memrar_free;
|
||||
|
||||
} else if (chunk->end < handle->begin
|
||||
&& chunk->end > old_end) {
|
||||
/* Keep track of where the entry could be
|
||||
* potentially moved from the "allocated" list
|
||||
* to the "free" list if coalescing doesn't
|
||||
* occur, making sure the "free" list remains
|
||||
* sorted.
|
||||
*/
|
||||
old_end = chunk->end;
|
||||
dst = pos;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing to coalesce.
|
||||
*
|
||||
* Move the entry from the "allocated" list to the "free"
|
||||
* list.
|
||||
*/
|
||||
list_move(&allocated->list, dst);
|
||||
new_chunk_size = handle->end - handle->begin;
|
||||
allocated = NULL;
|
||||
|
||||
exit_memrar_free:
|
||||
|
||||
if (new_chunk_size > allocator->largest_free_area)
|
||||
allocator->largest_free_area = new_chunk_size;
|
||||
|
||||
mutex_unlock(&allocator->lock);
|
||||
|
||||
kfree(allocated);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Local Variables:
|
||||
c-file-style: "linux"
|
||||
End:
|
||||
*/
|
|
@ -1,149 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2010 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General
|
||||
* Public License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be
|
||||
* useful, but WITHOUT ANY WARRANTY; without even the implied
|
||||
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
||||
* PURPOSE. See the GNU General Public License for more details.
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the Free
|
||||
* Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 02111-1307, USA.
|
||||
* The full GNU General Public License is included in this
|
||||
* distribution in the file called COPYING.
|
||||
*/
|
||||
|
||||
#ifndef MEMRAR_ALLOCATOR_H
|
||||
#define MEMRAR_ALLOCATOR_H
|
||||
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
|
||||
/**
|
||||
* struct memrar_address_range - struct that describes a memory range
|
||||
* @begin: Beginning of available address range.
|
||||
* @end: End of available address range, one past the end,
|
||||
* i.e. [begin, end).
|
||||
*/
|
||||
struct memrar_address_range {
|
||||
/* private: internal use only */
|
||||
unsigned long begin;
|
||||
unsigned long end;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct memrar_address_ranges - list of areas of memory.
|
||||
* @list: Linked list of address ranges.
|
||||
* @range: Memory address range corresponding to given list node.
|
||||
*/
|
||||
struct memrar_address_ranges {
|
||||
/* private: internal use only */
|
||||
struct list_head list;
|
||||
struct memrar_address_range range;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct memrar_allocator - encapsulation of the memory allocator state
|
||||
* @lock: Lock used to synchronize access to the memory
|
||||
* allocator state.
|
||||
* @base: Base (start) address of the allocator memory
|
||||
* space.
|
||||
* @capacity: Size of the allocator memory space in bytes.
|
||||
* @block_size: The size in bytes of individual blocks within
|
||||
* the allocator memory space.
|
||||
* @largest_free_area: Largest free area of memory in the allocator
|
||||
* in bytes.
|
||||
* @allocated_list: List of allocated memory block address
|
||||
* ranges.
|
||||
* @free_list: List of free address ranges.
|
||||
*
|
||||
* This structure contains all memory allocator state, including the
|
||||
* base address, capacity, free list, lock, etc.
|
||||
*/
|
||||
struct memrar_allocator {
|
||||
/* private: internal use only */
|
||||
struct mutex lock;
|
||||
unsigned long base;
|
||||
size_t capacity;
|
||||
size_t block_size;
|
||||
size_t largest_free_area;
|
||||
struct memrar_address_ranges allocated_list;
|
||||
struct memrar_address_ranges free_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* memrar_create_allocator() - create a memory allocator
|
||||
* @base: Address at which the memory allocator begins.
|
||||
* @capacity: Desired size of the memory allocator. This value must
|
||||
* be larger than the block_size, ideally more than twice
|
||||
* as large since there wouldn't be much point in using a
|
||||
* memory allocator otherwise.
|
||||
* @block_size: The size of individual blocks within the memory
|
||||
* allocator. This value must smaller than the
|
||||
* capacity.
|
||||
*
|
||||
* Create a memory allocator with the given capacity and block size.
|
||||
* The capacity will be reduced to be a multiple of the block size, if
|
||||
* necessary.
|
||||
*
|
||||
* Returns an instance of the memory allocator, if creation succeeds,
|
||||
* otherwise zero if creation fails. Failure may occur if not enough
|
||||
* kernel memory exists to create the memrar_allocator instance
|
||||
* itself, or if the capacity and block_size arguments are not
|
||||
* compatible or make sense.
|
||||
*/
|
||||
struct memrar_allocator *memrar_create_allocator(unsigned long base,
|
||||
size_t capacity,
|
||||
size_t block_size);
|
||||
|
||||
/**
|
||||
* memrar_destroy_allocator() - destroy allocator
|
||||
* @allocator: The allocator being destroyed.
|
||||
*
|
||||
* Reclaim resources held by the memory allocator. The caller must
|
||||
* explicitly free all memory reserved by memrar_allocator_alloc()
|
||||
* prior to calling this function. Otherwise leaks will occur.
|
||||
*/
|
||||
void memrar_destroy_allocator(struct memrar_allocator *allocator);
|
||||
|
||||
/**
|
||||
* memrar_allocator_alloc() - reserve an area of memory of given size
|
||||
* @allocator: The allocator instance being used to reserve buffer.
|
||||
* @size: The size in bytes of the buffer to allocate.
|
||||
*
|
||||
* This functions reserves an area of memory managed by the given
|
||||
* allocator. It returns zero if allocation was not possible.
|
||||
* Failure may occur if the allocator no longer has space available.
|
||||
*/
|
||||
unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
|
||||
size_t size);
|
||||
|
||||
/**
|
||||
* memrar_allocator_free() - release buffer starting at given address
|
||||
* @allocator: The allocator instance being used to release the buffer.
|
||||
* @address: The address of the buffer being released.
|
||||
*
|
||||
* Release an area of memory starting at the given address. Failure
|
||||
* could occur if the given address is not in the address space
|
||||
* managed by the allocator. Returns zero on success or an errno
|
||||
* (negative value) on failure.
|
||||
*/
|
||||
long memrar_allocator_free(struct memrar_allocator *allocator,
|
||||
unsigned long address);
|
||||
|
||||
#endif /* MEMRAR_ALLOCATOR_H */
|
||||
|
||||
|
||||
/*
|
||||
Local Variables:
|
||||
c-file-style: "linux"
|
||||
End:
|
||||
*/
|
File diff suppressed because it is too large
Load Diff
|
@ -9,7 +9,7 @@ config FB_OLPC_DCON
|
|||
|
||||
config FB_OLPC_DCON_1
|
||||
bool "OLPC XO-1 DCON support"
|
||||
depends on FB_OLPC_DCON
|
||||
depends on FB_OLPC_DCON && GPIO_CS5535
|
||||
default y
|
||||
---help---
|
||||
Enable support for the DCON in XO-1 model laptops. The kernel
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <asm/olpc.h>
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config R8187SE
|
||||
tristate "RealTek RTL8187SE Wireless LAN NIC driver"
|
||||
depends on PCI && WLAN
|
||||
depends on m
|
||||
select WIRELESS_EXT
|
||||
select WEXT_PRIV
|
||||
select EEPROM_93CX6
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config RTL8192E
|
||||
tristate "RealTek RTL8192E Wireless LAN NIC driver"
|
||||
depends on PCI && WLAN
|
||||
depends on m
|
||||
select WIRELESS_EXT
|
||||
select WEXT_PRIV
|
||||
select CRYPTO
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config RTL8192U
|
||||
tristate "RealTek RTL8192U Wireless LAN NIC driver"
|
||||
depends on PCI && WLAN && USB
|
||||
depends on m
|
||||
select WIRELESS_EXT
|
||||
select WEXT_PRIV
|
||||
select CRYPTO
|
||||
|
|
|
@ -824,13 +824,13 @@ static void rtsx_init_options(struct rtsx_chip *chip)
|
|||
chip->fpga_ms_hg_clk = CLK_80;
|
||||
chip->fpga_ms_4bit_clk = CLK_80;
|
||||
chip->fpga_ms_1bit_clk = CLK_40;
|
||||
chip->asic_sd_sdr104_clk = 207;
|
||||
chip->asic_sd_sdr50_clk = 99;
|
||||
chip->asic_sd_ddr50_clk = 99;
|
||||
chip->asic_sd_hs_clk = 99;
|
||||
chip->asic_mmc_52m_clk = 99;
|
||||
chip->asic_ms_hg_clk = 119;
|
||||
chip->asic_ms_4bit_clk = 79;
|
||||
chip->asic_sd_sdr104_clk = 203;
|
||||
chip->asic_sd_sdr50_clk = 98;
|
||||
chip->asic_sd_ddr50_clk = 98;
|
||||
chip->asic_sd_hs_clk = 98;
|
||||
chip->asic_mmc_52m_clk = 98;
|
||||
chip->asic_ms_hg_clk = 117;
|
||||
chip->asic_ms_4bit_clk = 78;
|
||||
chip->asic_ms_1bit_clk = 39;
|
||||
chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M;
|
||||
chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M;
|
||||
|
|
|
@ -684,6 +684,11 @@ static int rts5209_init(struct rtsx_chip *chip)
|
|||
RTSX_DEBUGP("dw in 0x724: 0x%x\n", lval);
|
||||
val = (u8)lval;
|
||||
if (!(val & 0x80)) {
|
||||
if (val & 0x08)
|
||||
chip->lun_mode = DEFAULT_SINGLE;
|
||||
else
|
||||
chip->lun_mode = SD_MS_2LUN;
|
||||
|
||||
if (val & 0x04) {
|
||||
SET_SDIO_EXIST(chip);
|
||||
} else {
|
||||
|
@ -705,12 +710,6 @@ static int rts5209_init(struct rtsx_chip *chip)
|
|||
|
||||
chip->aspm_l0s_l1_en = (val >> 5) & 0x03;
|
||||
|
||||
if (val & 0x08) {
|
||||
chip->lun_mode = DEFAULT_SINGLE;
|
||||
} else {
|
||||
chip->lun_mode = SD_MS_2LUN;
|
||||
}
|
||||
|
||||
val = (u8)(lval >> 8);
|
||||
|
||||
clk = (val >> 5) & 0x07;
|
||||
|
|
|
@ -55,8 +55,6 @@
|
|||
#include <linux/jiffies.h>
|
||||
#include <linux/rar_register.h>
|
||||
|
||||
#include "../memrar/memrar.h"
|
||||
|
||||
#include "sep_driver_hw_defs.h"
|
||||
#include "sep_driver_config.h"
|
||||
#include "sep_driver_api.h"
|
||||
|
@ -2372,7 +2370,6 @@ static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
|
|||
int error = 0;
|
||||
/* Command args */
|
||||
struct rar_hndl_to_bus_struct command_args;
|
||||
struct RAR_buffer rar_buf;
|
||||
/* Bus address */
|
||||
dma_addr_t rar_bus = 0;
|
||||
/* Holds the RAR address in the system memory offset */
|
||||
|
@ -2386,16 +2383,8 @@ static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
|
|||
}
|
||||
|
||||
/* Call to translation function only if user handle is not NULL */
|
||||
if (command_args.rar_handle) {
|
||||
memset(&rar_buf, 0, sizeof(rar_buf));
|
||||
rar_buf.info.handle = (u32)command_args.rar_handle;
|
||||
|
||||
if (rar_handle_to_bus(&rar_buf, 1) != 1) {
|
||||
error = -EFAULT;
|
||||
goto end_function;
|
||||
}
|
||||
rar_bus = rar_buf.bus_address;
|
||||
}
|
||||
if (command_args.rar_handle)
|
||||
return -EOPNOTSUPP;
|
||||
dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
|
||||
|
||||
/* Set value in the SYSTEM MEMORY offset */
|
||||
|
|
|
@ -26,10 +26,6 @@
|
|||
* Boyod.yang <boyod.yang@siliconmotion.com.cn>
|
||||
*/
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#define __KERNEL__
|
||||
#endif
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -1019,6 +1015,7 @@ static void __devexit smtcfb_pci_remove(struct pci_dev *pdev)
|
|||
smtc_free_fb_info(sfb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* Jason (08/14/2009)
|
||||
* suspend function, called when the suspend event is triggered
|
||||
*/
|
||||
|
@ -1111,6 +1108,7 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Jason (08/13/2009)
|
||||
* pci_driver struct used to wrap the original driver
|
||||
|
|
|
@ -220,8 +220,10 @@ static void stub_shutdown_connection(struct usbip_device *ud)
|
|||
}
|
||||
|
||||
/* 1. stop threads */
|
||||
kthread_stop(ud->tcp_rx);
|
||||
kthread_stop(ud->tcp_tx);
|
||||
if (ud->tcp_rx && !task_is_dead(ud->tcp_rx))
|
||||
kthread_stop(ud->tcp_rx);
|
||||
if (ud->tcp_tx && !task_is_dead(ud->tcp_tx))
|
||||
kthread_stop(ud->tcp_tx);
|
||||
|
||||
/* 2. close the socket */
|
||||
/*
|
||||
|
|
|
@ -171,33 +171,23 @@ static int tweak_set_configuration_cmd(struct urb *urb)
|
|||
|
||||
static int tweak_reset_device_cmd(struct urb *urb)
|
||||
{
|
||||
struct usb_ctrlrequest *req;
|
||||
__u16 value;
|
||||
__u16 index;
|
||||
int ret;
|
||||
struct stub_priv *priv = (struct stub_priv *) urb->context;
|
||||
struct stub_device *sdev = priv->sdev;
|
||||
|
||||
req = (struct usb_ctrlrequest *) urb->setup_packet;
|
||||
value = le16_to_cpu(req->wValue);
|
||||
index = le16_to_cpu(req->wIndex);
|
||||
usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev));
|
||||
|
||||
usbip_uinfo("reset_device (port %d) to %s\n", index,
|
||||
dev_name(&urb->dev->dev));
|
||||
|
||||
/* all interfaces should be owned by usbip driver, so just reset it. */
|
||||
ret = usb_lock_device_for_reset(urb->dev, NULL);
|
||||
if (ret < 0) {
|
||||
dev_err(&urb->dev->dev, "lock for reset\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* try to reset the device */
|
||||
ret = usb_reset_device(urb->dev);
|
||||
if (ret < 0)
|
||||
dev_err(&urb->dev->dev, "device reset\n");
|
||||
|
||||
usb_unlock_device(urb->dev);
|
||||
|
||||
return ret;
|
||||
/*
|
||||
* usb_lock_device_for_reset caused a deadlock: it causes the driver
|
||||
* to unbind. In the shutdown the rx thread is signalled to shut down
|
||||
* but this thread is pending in the usb_lock_device_for_reset.
|
||||
*
|
||||
* Instead queue the reset.
|
||||
*
|
||||
* Unfortunatly an existing usbip connection will be dropped due to
|
||||
* driver unbinding.
|
||||
*/
|
||||
usb_queue_reset_device(sdev->interface);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -170,7 +170,6 @@ static int stub_send_ret_submit(struct stub_device *sdev)
|
|||
struct stub_priv *priv, *tmp;
|
||||
|
||||
struct msghdr msg;
|
||||
struct kvec iov[3];
|
||||
size_t txsize;
|
||||
|
||||
size_t total_size = 0;
|
||||
|
@ -180,28 +179,73 @@ static int stub_send_ret_submit(struct stub_device *sdev)
|
|||
struct urb *urb = priv->urb;
|
||||
struct usbip_header pdu_header;
|
||||
void *iso_buffer = NULL;
|
||||
struct kvec *iov = NULL;
|
||||
int iovnum = 0;
|
||||
|
||||
txsize = 0;
|
||||
memset(&pdu_header, 0, sizeof(pdu_header));
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
memset(&iov, 0, sizeof(iov));
|
||||
|
||||
usbip_dbg_stub_tx("setup txdata urb %p\n", urb);
|
||||
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
|
||||
iovnum = 2 + urb->number_of_packets;
|
||||
else
|
||||
iovnum = 2;
|
||||
|
||||
iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL);
|
||||
|
||||
if (!iov) {
|
||||
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iovnum = 0;
|
||||
|
||||
/* 1. setup usbip_header */
|
||||
setup_ret_submit_pdu(&pdu_header, urb);
|
||||
usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
|
||||
pdu_header.base.seqnum, urb);
|
||||
/*usbip_dump_header(pdu_header);*/
|
||||
usbip_header_correct_endian(&pdu_header, 1);
|
||||
|
||||
iov[0].iov_base = &pdu_header;
|
||||
iov[0].iov_len = sizeof(pdu_header);
|
||||
iov[iovnum].iov_base = &pdu_header;
|
||||
iov[iovnum].iov_len = sizeof(pdu_header);
|
||||
iovnum++;
|
||||
txsize += sizeof(pdu_header);
|
||||
|
||||
/* 2. setup transfer buffer */
|
||||
if (usb_pipein(urb->pipe) && urb->actual_length > 0) {
|
||||
iov[1].iov_base = urb->transfer_buffer;
|
||||
iov[1].iov_len = urb->actual_length;
|
||||
if (usb_pipein(urb->pipe) &&
|
||||
usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
|
||||
urb->actual_length > 0) {
|
||||
iov[iovnum].iov_base = urb->transfer_buffer;
|
||||
iov[iovnum].iov_len = urb->actual_length;
|
||||
iovnum++;
|
||||
txsize += urb->actual_length;
|
||||
} else if (usb_pipein(urb->pipe) &&
|
||||
usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
|
||||
/*
|
||||
* For isochronous packets: actual length is the sum of
|
||||
* the actual length of the individual, packets, but as
|
||||
* the packet offsets are not changed there will be
|
||||
* padding between the packets. To optimally use the
|
||||
* bandwidth the padding is not transmitted.
|
||||
*/
|
||||
|
||||
int i;
|
||||
for (i = 0; i < urb->number_of_packets; i++) {
|
||||
iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
|
||||
iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length;
|
||||
iovnum++;
|
||||
txsize += urb->iso_frame_desc[i].actual_length;
|
||||
}
|
||||
|
||||
if (txsize != sizeof(pdu_header) + urb->actual_length) {
|
||||
dev_err(&sdev->interface->dev,
|
||||
"actual length of urb (%d) does not match iso packet sizes (%d)\n",
|
||||
urb->actual_length, txsize-sizeof(pdu_header));
|
||||
kfree(iov);
|
||||
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* 3. setup iso_packet_descriptor */
|
||||
|
@ -212,32 +256,34 @@ static int stub_send_ret_submit(struct stub_device *sdev)
|
|||
if (!iso_buffer) {
|
||||
usbip_event_add(&sdev->ud,
|
||||
SDEV_EVENT_ERROR_MALLOC);
|
||||
kfree(iov);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iov[2].iov_base = iso_buffer;
|
||||
iov[2].iov_len = len;
|
||||
iov[iovnum].iov_base = iso_buffer;
|
||||
iov[iovnum].iov_len = len;
|
||||
txsize += len;
|
||||
iovnum++;
|
||||
}
|
||||
|
||||
ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
|
||||
3, txsize);
|
||||
ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
|
||||
iov, iovnum, txsize);
|
||||
if (ret != txsize) {
|
||||
dev_err(&sdev->interface->dev,
|
||||
"sendmsg failed!, retval %d for %zd\n",
|
||||
ret, txsize);
|
||||
kfree(iov);
|
||||
kfree(iso_buffer);
|
||||
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
|
||||
return -1;
|
||||
}
|
||||
|
||||
kfree(iov);
|
||||
kfree(iso_buffer);
|
||||
usbip_dbg_stub_tx("send txdata\n");
|
||||
|
||||
total_size += txsize;
|
||||
}
|
||||
|
||||
|
||||
spin_lock_irqsave(&sdev->priv_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
|
||||
|
|
|
@ -333,10 +333,11 @@ void usbip_dump_header(struct usbip_header *pdu)
|
|||
usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum);
|
||||
break;
|
||||
case USBIP_RET_SUBMIT:
|
||||
usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n",
|
||||
usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n",
|
||||
pdu->u.ret_submit.status,
|
||||
pdu->u.ret_submit.actual_length,
|
||||
pdu->u.ret_submit.start_frame,
|
||||
pdu->u.ret_submit.number_of_packets,
|
||||
pdu->u.ret_submit.error_count);
|
||||
case USBIP_RET_UNLINK:
|
||||
usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status);
|
||||
|
@ -520,6 +521,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb,
|
|||
rpdu->status = urb->status;
|
||||
rpdu->actual_length = urb->actual_length;
|
||||
rpdu->start_frame = urb->start_frame;
|
||||
rpdu->number_of_packets = urb->number_of_packets;
|
||||
rpdu->error_count = urb->error_count;
|
||||
} else {
|
||||
/* vhci_rx.c */
|
||||
|
@ -527,6 +529,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb,
|
|||
urb->status = rpdu->status;
|
||||
urb->actual_length = rpdu->actual_length;
|
||||
urb->start_frame = rpdu->start_frame;
|
||||
urb->number_of_packets = rpdu->number_of_packets;
|
||||
urb->error_count = rpdu->error_count;
|
||||
}
|
||||
}
|
||||
|
@ -595,11 +598,13 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu,
|
|||
cpu_to_be32s(&pdu->status);
|
||||
cpu_to_be32s(&pdu->actual_length);
|
||||
cpu_to_be32s(&pdu->start_frame);
|
||||
cpu_to_be32s(&pdu->number_of_packets);
|
||||
cpu_to_be32s(&pdu->error_count);
|
||||
} else {
|
||||
be32_to_cpus(&pdu->status);
|
||||
be32_to_cpus(&pdu->actual_length);
|
||||
be32_to_cpus(&pdu->start_frame);
|
||||
cpu_to_be32s(&pdu->number_of_packets);
|
||||
be32_to_cpus(&pdu->error_count);
|
||||
}
|
||||
}
|
||||
|
@ -725,6 +730,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
|
|||
int size = np * sizeof(*iso);
|
||||
int i;
|
||||
int ret;
|
||||
int total_length = 0;
|
||||
|
||||
if (!usb_pipeisoc(urb->pipe))
|
||||
return 0;
|
||||
|
@ -754,19 +760,75 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
|
|||
return -EPIPE;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < np; i++) {
|
||||
iso = buff + (i * sizeof(*iso));
|
||||
|
||||
usbip_iso_pakcet_correct_endian(iso, 0);
|
||||
usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0);
|
||||
total_length += urb->iso_frame_desc[i].actual_length;
|
||||
}
|
||||
|
||||
kfree(buff);
|
||||
|
||||
if (total_length != urb->actual_length) {
|
||||
dev_err(&urb->dev->dev,
|
||||
"total length of iso packets (%d) not equal to actual length of buffer (%d)\n",
|
||||
total_length, urb->actual_length);
|
||||
|
||||
if (ud->side == USBIP_STUB)
|
||||
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
|
||||
else
|
||||
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
|
||||
|
||||
return -EPIPE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usbip_recv_iso);
|
||||
|
||||
/*
|
||||
* This functions restores the padding which was removed for optimizing
|
||||
* the bandwidth during transfer over tcp/ip
|
||||
*
|
||||
* buffer and iso packets need to be stored and be in propeper endian in urb
|
||||
* before calling this function
|
||||
*/
|
||||
int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
|
||||
{
|
||||
int np = urb->number_of_packets;
|
||||
int i;
|
||||
int ret;
|
||||
int actualoffset = urb->actual_length;
|
||||
|
||||
if (!usb_pipeisoc(urb->pipe))
|
||||
return 0;
|
||||
|
||||
/* if no packets or length of data is 0, then nothing to unpack */
|
||||
if (np == 0 || urb->actual_length == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* if actual_length is transfer_buffer_length then no padding is
|
||||
* present.
|
||||
*/
|
||||
if (urb->actual_length == urb->transfer_buffer_length)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* loop over all packets from last to first (to prevent overwritting
|
||||
* memory when padding) and move them into the proper place
|
||||
*/
|
||||
for (i = np-1; i > 0; i--) {
|
||||
actualoffset -= urb->iso_frame_desc[i].actual_length;
|
||||
memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset,
|
||||
urb->transfer_buffer + actualoffset,
|
||||
urb->iso_frame_desc[i].actual_length);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usbip_pad_iso);
|
||||
|
||||
/* some members of urb must be substituted before. */
|
||||
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
|
||||
|
|
|
@ -379,6 +379,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
|
|||
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
|
||||
/* some members of urb must be substituted before. */
|
||||
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
|
||||
/* some members of urb must be substituted before. */
|
||||
int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
|
||||
void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
|
||||
|
||||
|
||||
|
|
|
@ -100,6 +100,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
|
|||
if (usbip_recv_iso(ud, urb) < 0)
|
||||
return;
|
||||
|
||||
/* restore the padding in iso packets */
|
||||
if (usbip_pad_iso(ud, urb) < 0)
|
||||
return;
|
||||
|
||||
if (usbip_dbg_flag_vhci_rx)
|
||||
usbip_dump_urb(urb);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config VT6655
|
||||
tristate "VIA Technologies VT6655 support"
|
||||
depends on PCI && WLAN
|
||||
depends on PCI && WLAN && m
|
||||
select WIRELESS_EXT
|
||||
select WEXT_PRIV
|
||||
---help---
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config VT6656
|
||||
tristate "VIA Technologies VT6656 support"
|
||||
depends on USB && WLAN
|
||||
depends on USB && WLAN && m
|
||||
select WIRELESS_EXT
|
||||
select WEXT_PRIV
|
||||
select FW_LOADER
|
||||
|
|
|
@ -587,6 +587,7 @@ static int cyasgadget_enable(
|
|||
"cy_as_usb_end_point_config EP %s mismatch "
|
||||
"on enabled\n", an_ep->usb_ep_inst.name);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&an_dev->lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue