Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (56 commits) ieee1394: remove garbage from Kconfig ieee1394: more help in Kconfig ieee1394: ohci1394: Fix mistake in printk message. ieee1394: ohci1394: remove unnecessary rcvPhyPkt bit flipping in LinkControl register ieee1394: ohci1394: fix cosmetic problem in error logging ieee1394: eth1394: send async streams at S100 on 1394b buses ieee1394: eth1394: fix error path in module_init ieee1394: eth1394: correct return codes in hard_start_xmit ieee1394: eth1394: hard_start_xmit is called in atomic context ieee1394: eth1394: some conditions are unlikely ieee1394: eth1394: clean up fragment_overlap ieee1394: eth1394: don't use alloc_etherdev ieee1394: eth1394: omit useless set_mac_address callback ieee1394: eth1394: CONFIG_INET is always defined ieee1394: eth1394: allow MTU bigger than 1500 ieee1394: unexport highlevel_host_reset ieee1394: eth1394: contain host reset ieee1394: eth1394: shorter error messages ieee1394: eth1394: correct a memset argument ieee1394: eth1394: refactor .probe and .update ...
This commit is contained in:
commit
40caf5ea5a
|
@ -1,11 +1,8 @@
|
|||
# -*- shell-script -*-
|
||||
|
||||
menu "IEEE 1394 (FireWire) support"
|
||||
|
||||
config IEEE1394
|
||||
tristate "IEEE 1394 (FireWire) support"
|
||||
depends on PCI || BROKEN
|
||||
select NET
|
||||
help
|
||||
IEEE 1394 describes a high performance serial bus, which is also
|
||||
known as FireWire(tm) or i.Link(tm) and is used for connecting all
|
||||
|
@ -35,24 +32,7 @@ config IEEE1394_VERBOSEDEBUG
|
|||
Say Y if you really want or need the debugging output, everyone
|
||||
else says N.
|
||||
|
||||
config IEEE1394_EXTRA_CONFIG_ROMS
|
||||
bool "Build in extra config rom entries for certain functionality"
|
||||
depends on IEEE1394
|
||||
help
|
||||
Some IEEE1394 functionality depends on extra config rom entries
|
||||
being available in the host adapters CSR. These options will
|
||||
allow you to choose which ones.
|
||||
|
||||
config IEEE1394_CONFIG_ROM_IP1394
|
||||
bool "IP-1394 Entry"
|
||||
depends on IEEE1394_EXTRA_CONFIG_ROMS && IEEE1394
|
||||
help
|
||||
Adds an entry for using IP-over-1394. If you want to use your
|
||||
IEEE1394 bus as a network for IP systems (including interacting
|
||||
with MacOSX and WinXP IP-over-1394), enable this option and the
|
||||
eth1394 option below.
|
||||
|
||||
comment "Device Drivers"
|
||||
comment "Controllers"
|
||||
depends on IEEE1394
|
||||
|
||||
comment "Texas Instruments PCILynx requires I2C"
|
||||
|
@ -70,6 +50,10 @@ config IEEE1394_PCILYNX
|
|||
To compile this driver as a module, say M here: the
|
||||
module will be called pcilynx.
|
||||
|
||||
Only some old and now very rare PCI and CardBus cards and
|
||||
PowerMacs G3 B&W contain the PCILynx controller. Therefore
|
||||
almost everybody can say N here.
|
||||
|
||||
config IEEE1394_OHCI1394
|
||||
tristate "OHCI-1394 support"
|
||||
depends on PCI && IEEE1394
|
||||
|
@ -83,7 +67,7 @@ config IEEE1394_OHCI1394
|
|||
To compile this driver as a module, say M here: the
|
||||
module will be called ohci1394.
|
||||
|
||||
comment "Protocol Drivers"
|
||||
comment "Protocols"
|
||||
depends on IEEE1394
|
||||
|
||||
config IEEE1394_VIDEO1394
|
||||
|
@ -121,11 +105,15 @@ config IEEE1394_SBP2_PHYS_DMA
|
|||
This option is buggy and currently broken on some architectures.
|
||||
If unsure, say N.
|
||||
|
||||
config IEEE1394_ETH1394_ROM_ENTRY
|
||||
depends on IEEE1394
|
||||
bool
|
||||
default n
|
||||
|
||||
config IEEE1394_ETH1394
|
||||
tristate "Ethernet over 1394"
|
||||
tristate "IP over 1394"
|
||||
depends on IEEE1394 && EXPERIMENTAL && INET
|
||||
select IEEE1394_CONFIG_ROM_IP1394
|
||||
select IEEE1394_EXTRA_CONFIG_ROMS
|
||||
select IEEE1394_ETH1394_ROM_ENTRY
|
||||
help
|
||||
This driver implements a functional majority of RFC 2734: IPv4 over
|
||||
1394. It will provide IP connectivity with implementations of RFC
|
||||
|
@ -134,6 +122,8 @@ config IEEE1394_ETH1394
|
|||
This driver is still considered experimental. It does not yet support
|
||||
MCAP, therefore multicast support is significantly limited.
|
||||
|
||||
The module is called eth1394 although it does not emulate Ethernet.
|
||||
|
||||
config IEEE1394_DV1394
|
||||
tristate "OHCI-DV I/O support (deprecated)"
|
||||
depends on IEEE1394 && IEEE1394_OHCI1394
|
||||
|
@ -146,12 +136,12 @@ config IEEE1394_RAWIO
|
|||
tristate "Raw IEEE1394 I/O support"
|
||||
depends on IEEE1394
|
||||
help
|
||||
Say Y here if you want support for the raw device. This is generally
|
||||
a good idea, so you should say Y here. The raw device enables
|
||||
direct communication of user programs with the IEEE 1394 bus and
|
||||
thus with the attached peripherals.
|
||||
This option adds support for the raw1394 device file which enables
|
||||
direct communication of user programs with the IEEE 1394 bus and thus
|
||||
with the attached peripherals. Almost all application programs which
|
||||
access FireWire require this option.
|
||||
|
||||
To compile this driver as a module, say M here: the
|
||||
module will be called raw1394.
|
||||
To compile this driver as a module, say M here: the module will be
|
||||
called raw1394.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -26,12 +26,6 @@ struct hpsb_config_rom_entry {
|
|||
/* Base initialization, called at module load */
|
||||
int (*init)(void);
|
||||
|
||||
/* Add entry to specified host */
|
||||
int (*add)(struct hpsb_host *host);
|
||||
|
||||
/* Remove entry from specified host */
|
||||
void (*remove)(struct hpsb_host *host);
|
||||
|
||||
/* Cleanup called at module exit */
|
||||
void (*cleanup)(void);
|
||||
|
||||
|
@ -39,7 +33,7 @@ struct hpsb_config_rom_entry {
|
|||
unsigned int flag;
|
||||
};
|
||||
|
||||
|
||||
/* The default host entry. This must succeed. */
|
||||
int hpsb_default_host_entry(struct hpsb_host *host)
|
||||
{
|
||||
struct csr1212_keyval *root;
|
||||
|
@ -63,9 +57,9 @@ int hpsb_default_host_entry(struct hpsb_host *host)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = csr1212_associate_keyval(vend_id, text);
|
||||
csr1212_associate_keyval(vend_id, text);
|
||||
csr1212_release_keyval(text);
|
||||
ret |= csr1212_attach_keyval_to_directory(root, vend_id);
|
||||
ret = csr1212_attach_keyval_to_directory(root, vend_id);
|
||||
csr1212_release_keyval(vend_id);
|
||||
if (ret != CSR1212_SUCCESS) {
|
||||
csr1212_destroy_csr(host->csr.rom);
|
||||
|
@ -78,7 +72,7 @@ int hpsb_default_host_entry(struct hpsb_host *host)
|
|||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
|
||||
#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
|
||||
#include "eth1394.h"
|
||||
|
||||
static struct csr1212_keyval *ip1394_ud;
|
||||
|
@ -103,10 +97,12 @@ static int config_rom_ip1394_init(void)
|
|||
if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
|
||||
goto ip1394_fail;
|
||||
|
||||
if (csr1212_associate_keyval(spec_id, spec_desc) == CSR1212_SUCCESS &&
|
||||
csr1212_associate_keyval(ver, ver_desc) == CSR1212_SUCCESS &&
|
||||
csr1212_attach_keyval_to_directory(ip1394_ud, spec_id) == CSR1212_SUCCESS &&
|
||||
csr1212_attach_keyval_to_directory(ip1394_ud, ver) == CSR1212_SUCCESS)
|
||||
csr1212_associate_keyval(spec_id, spec_desc);
|
||||
csr1212_associate_keyval(ver, ver_desc);
|
||||
if (csr1212_attach_keyval_to_directory(ip1394_ud, spec_id)
|
||||
== CSR1212_SUCCESS &&
|
||||
csr1212_attach_keyval_to_directory(ip1394_ud, ver)
|
||||
== CSR1212_SUCCESS)
|
||||
ret = 0;
|
||||
|
||||
ip1394_fail:
|
||||
|
@ -135,7 +131,7 @@ static void config_rom_ip1394_cleanup(void)
|
|||
}
|
||||
}
|
||||
|
||||
static int config_rom_ip1394_add(struct hpsb_host *host)
|
||||
int hpsb_config_rom_ip1394_add(struct hpsb_host *host)
|
||||
{
|
||||
if (!ip1394_ud)
|
||||
return -ENODEV;
|
||||
|
@ -144,92 +140,55 @@ static int config_rom_ip1394_add(struct hpsb_host *host)
|
|||
ip1394_ud) != CSR1212_SUCCESS)
|
||||
return -ENOMEM;
|
||||
|
||||
host->config_roms |= HPSB_CONFIG_ROM_ENTRY_IP1394;
|
||||
host->update_config_rom = 1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_add);
|
||||
|
||||
static void config_rom_ip1394_remove(struct hpsb_host *host)
|
||||
void hpsb_config_rom_ip1394_remove(struct hpsb_host *host)
|
||||
{
|
||||
csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
|
||||
host->config_roms &= ~HPSB_CONFIG_ROM_ENTRY_IP1394;
|
||||
host->update_config_rom = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_remove);
|
||||
|
||||
static struct hpsb_config_rom_entry ip1394_entry = {
|
||||
.name = "ip1394",
|
||||
.init = config_rom_ip1394_init,
|
||||
.add = config_rom_ip1394_add,
|
||||
.remove = config_rom_ip1394_remove,
|
||||
.cleanup = config_rom_ip1394_cleanup,
|
||||
.flag = HPSB_CONFIG_ROM_ENTRY_IP1394,
|
||||
};
|
||||
#endif /* CONFIG_IEEE1394_CONFIG_ROM_IP1394 */
|
||||
|
||||
#endif /* CONFIG_IEEE1394_ETH1394_ROM_ENTRY */
|
||||
|
||||
static struct hpsb_config_rom_entry *const config_rom_entries[] = {
|
||||
#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394
|
||||
#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
|
||||
&ip1394_entry,
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
||||
/* Initialize all config roms */
|
||||
int hpsb_init_config_roms(void)
|
||||
{
|
||||
int i, error = 0;
|
||||
|
||||
for (i = 0; config_rom_entries[i]; i++) {
|
||||
if (!config_rom_entries[i]->init)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
|
||||
if (config_rom_entries[i]->init()) {
|
||||
HPSB_ERR("Failed to initialize config rom entry `%s'",
|
||||
config_rom_entries[i]->name);
|
||||
error = -1;
|
||||
} else
|
||||
HPSB_DEBUG("Initialized config rom entry `%s'",
|
||||
config_rom_entries[i]->name);
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Cleanup all config roms */
|
||||
void hpsb_cleanup_config_roms(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; config_rom_entries[i]; i++) {
|
||||
if (config_rom_entries[i]->cleanup)
|
||||
config_rom_entries[i]->cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
int hpsb_add_extra_config_roms(struct hpsb_host *host)
|
||||
{
|
||||
int i, error = 0;
|
||||
|
||||
for (i = 0; config_rom_entries[i]; i++) {
|
||||
if (config_rom_entries[i]->add(host)) {
|
||||
HPSB_ERR("fw-host%d: Failed to attach config rom entry `%s'",
|
||||
host->id, config_rom_entries[i]->name);
|
||||
error = -1;
|
||||
} else {
|
||||
host->config_roms |= config_rom_entries[i]->flag;
|
||||
host->update_config_rom = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
void hpsb_remove_extra_config_roms(struct hpsb_host *host)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; config_rom_entries[i]; i++) {
|
||||
if (!(host->config_roms & config_rom_entries[i]->flag))
|
||||
continue;
|
||||
|
||||
config_rom_entries[i]->remove(host);
|
||||
|
||||
host->config_roms &= ~config_rom_entries[i]->flag;
|
||||
host->update_config_rom = 1;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
|
||||
config_rom_entries[i]->cleanup();
|
||||
}
|
||||
|
|
|
@ -1,27 +1,19 @@
|
|||
#ifndef _IEEE1394_CONFIG_ROMS_H
|
||||
#define _IEEE1394_CONFIG_ROMS_H
|
||||
|
||||
#include "ieee1394_types.h"
|
||||
#include "hosts.h"
|
||||
struct hpsb_host;
|
||||
|
||||
/* The default host entry. This must succeed. */
|
||||
int hpsb_default_host_entry(struct hpsb_host *host);
|
||||
|
||||
/* Initialize all config roms */
|
||||
int hpsb_init_config_roms(void);
|
||||
|
||||
/* Cleanup all config roms */
|
||||
void hpsb_cleanup_config_roms(void);
|
||||
|
||||
/* Add extra config roms to specified host */
|
||||
int hpsb_add_extra_config_roms(struct hpsb_host *host);
|
||||
|
||||
/* Remove extra config roms from specified host */
|
||||
void hpsb_remove_extra_config_roms(struct hpsb_host *host);
|
||||
|
||||
|
||||
/* List of flags to check if a host contains a certain extra config rom
|
||||
* entry. Available in the host->config_roms member. */
|
||||
#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001
|
||||
|
||||
#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
|
||||
int hpsb_config_rom_ip1394_add(struct hpsb_host *host);
|
||||
void hpsb_config_rom_ip1394_remove(struct hpsb_host *host);
|
||||
#endif
|
||||
|
||||
#endif /* _IEEE1394_CONFIG_ROMS_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -30,94 +30,13 @@
|
|||
#ifndef __CSR1212_H__
|
||||
#define __CSR1212_H__
|
||||
|
||||
|
||||
/* Compatibility layer */
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
#define CSR1212_MALLOC(size) vmalloc((size))
|
||||
#define CSR1212_FREE(ptr) vfree(ptr)
|
||||
#define CSR1212_BE16_TO_CPU(quad) be16_to_cpu(quad)
|
||||
#define CSR1212_CPU_TO_BE16(quad) cpu_to_be16(quad)
|
||||
#define CSR1212_BE32_TO_CPU(quad) be32_to_cpu(quad)
|
||||
#define CSR1212_CPU_TO_BE32(quad) cpu_to_be32(quad)
|
||||
#define CSR1212_BE64_TO_CPU(quad) be64_to_cpu(quad)
|
||||
#define CSR1212_CPU_TO_BE64(quad) cpu_to_be64(quad)
|
||||
#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
|
||||
#define CSR1212_FREE(ptr) kfree(ptr)
|
||||
|
||||
#define CSR1212_LE16_TO_CPU(quad) le16_to_cpu(quad)
|
||||
#define CSR1212_CPU_TO_LE16(quad) cpu_to_le16(quad)
|
||||
#define CSR1212_LE32_TO_CPU(quad) le32_to_cpu(quad)
|
||||
#define CSR1212_CPU_TO_LE32(quad) cpu_to_le32(quad)
|
||||
#define CSR1212_LE64_TO_CPU(quad) le64_to_cpu(quad)
|
||||
#define CSR1212_CPU_TO_LE64(quad) cpu_to_le64(quad)
|
||||
|
||||
#include <linux/errno.h>
|
||||
#define CSR1212_SUCCESS (0)
|
||||
#define CSR1212_EINVAL (-EINVAL)
|
||||
#define CSR1212_ENOMEM (-ENOMEM)
|
||||
#define CSR1212_ENOENT (-ENOENT)
|
||||
#define CSR1212_EIO (-EIO)
|
||||
#define CSR1212_EBUSY (-EBUSY)
|
||||
|
||||
#else /* Userspace */
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <malloc.h>
|
||||
#define CSR1212_MALLOC(size) malloc(size)
|
||||
#define CSR1212_FREE(ptr) free(ptr)
|
||||
#include <endian.h>
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#include <byteswap.h>
|
||||
#define CSR1212_BE16_TO_CPU(quad) bswap_16(quad)
|
||||
#define CSR1212_CPU_TO_BE16(quad) bswap_16(quad)
|
||||
#define CSR1212_BE32_TO_CPU(quad) bswap_32(quad)
|
||||
#define CSR1212_CPU_TO_BE32(quad) bswap_32(quad)
|
||||
#define CSR1212_BE64_TO_CPU(quad) bswap_64(quad)
|
||||
#define CSR1212_CPU_TO_BE64(quad) bswap_64(quad)
|
||||
|
||||
#define CSR1212_LE16_TO_CPU(quad) (quad)
|
||||
#define CSR1212_CPU_TO_LE16(quad) (quad)
|
||||
#define CSR1212_LE32_TO_CPU(quad) (quad)
|
||||
#define CSR1212_CPU_TO_LE32(quad) (quad)
|
||||
#define CSR1212_LE64_TO_CPU(quad) (quad)
|
||||
#define CSR1212_CPU_TO_LE64(quad) (quad)
|
||||
#else
|
||||
#define CSR1212_BE16_TO_CPU(quad) (quad)
|
||||
#define CSR1212_CPU_TO_BE16(quad) (quad)
|
||||
#define CSR1212_BE32_TO_CPU(quad) (quad)
|
||||
#define CSR1212_CPU_TO_BE32(quad) (quad)
|
||||
#define CSR1212_BE64_TO_CPU(quad) (quad)
|
||||
#define CSR1212_CPU_TO_BE64(quad) (quad)
|
||||
|
||||
#define CSR1212_LE16_TO_CPU(quad) bswap_16(quad)
|
||||
#define CSR1212_CPU_TO_LE16(quad) bswap_16(quad)
|
||||
#define CSR1212_LE32_TO_CPU(quad) bswap_32(quad)
|
||||
#define CSR1212_CPU_TO_LE32(quad) bswap_32(quad)
|
||||
#define CSR1212_LE64_TO_CPU(quad) bswap_64(quad)
|
||||
#define CSR1212_CPU_TO_LE64(quad) bswap_64(quad)
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
#define CSR1212_SUCCESS (0)
|
||||
#define CSR1212_EINVAL (EINVAL)
|
||||
#define CSR1212_ENOMEM (ENOMEM)
|
||||
#define CSR1212_ENOENT (ENOENT)
|
||||
#define CSR1212_EIO (EIO)
|
||||
#define CSR1212_EBUSY (EBUSY)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define CSR1212_KV_VAL_MASK 0xffffff
|
||||
#define CSR1212_KV_KEY_SHIFT 24
|
||||
#define CSR1212_KV_KEY_TYPE_SHIFT 6
|
||||
#define CSR1212_KV_KEY_ID_MASK 0x3f
|
||||
#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* After shift */
|
||||
|
||||
|
||||
/* CSR 1212 key types */
|
||||
|
@ -190,48 +109,22 @@
|
|||
#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
|
||||
#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
|
||||
|
||||
#define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u_int32_t))
|
||||
|
||||
#define CSR1212_INVALID_ADDR_SPACE -1
|
||||
|
||||
|
||||
/* Config ROM image structures */
|
||||
struct csr1212_bus_info_block_img {
|
||||
u_int8_t length;
|
||||
u_int8_t crc_length;
|
||||
u_int16_t crc;
|
||||
u8 length;
|
||||
u8 crc_length;
|
||||
u16 crc;
|
||||
|
||||
/* Must be last */
|
||||
u_int32_t data[0]; /* older gcc can't handle [] which is standard */
|
||||
};
|
||||
|
||||
#define CSR1212_KV_KEY(quad) (CSR1212_BE32_TO_CPU(quad) >> CSR1212_KV_KEY_SHIFT)
|
||||
#define CSR1212_KV_KEY_TYPE(quad) (CSR1212_KV_KEY(quad) >> CSR1212_KV_KEY_TYPE_SHIFT)
|
||||
#define CSR1212_KV_KEY_ID(quad) (CSR1212_KV_KEY(quad) & CSR1212_KV_KEY_ID_MASK)
|
||||
#define CSR1212_KV_VAL(quad) (CSR1212_BE32_TO_CPU(quad) & CSR1212_KV_VAL_MASK)
|
||||
|
||||
#define CSR1212_SET_KV_KEY(quad, key) ((quad) = \
|
||||
CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | ((key) << CSR1212_KV_KEY_SHIFT)))
|
||||
#define CSR1212_SET_KV_VAL(quad, val) ((quad) = \
|
||||
CSR1212_CPU_TO_BE32((CSR1212_KV_KEY(quad) << CSR1212_KV_KEY_SHIFT) | (val)))
|
||||
#define CSR1212_SET_KV_TYPEID(quad, type, id) ((quad) = \
|
||||
CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | \
|
||||
(((((type) & CSR1212_KV_KEY_TYPE_MASK) << CSR1212_KV_KEY_TYPE_SHIFT) | \
|
||||
((id) & CSR1212_KV_KEY_ID_MASK)) << CSR1212_KV_KEY_SHIFT)))
|
||||
|
||||
typedef u_int32_t csr1212_quad_t;
|
||||
|
||||
|
||||
struct csr1212_keyval_img {
|
||||
u_int16_t length;
|
||||
u_int16_t crc;
|
||||
|
||||
/* Must be last */
|
||||
csr1212_quad_t data[0]; /* older gcc can't handle [] which is standard */
|
||||
u32 data[0]; /* older gcc can't handle [] which is standard */
|
||||
};
|
||||
|
||||
struct csr1212_leaf {
|
||||
int len;
|
||||
u_int32_t *data;
|
||||
u32 *data;
|
||||
};
|
||||
|
||||
struct csr1212_dentry {
|
||||
|
@ -246,12 +139,12 @@ struct csr1212_directory {
|
|||
|
||||
struct csr1212_keyval {
|
||||
struct {
|
||||
u_int8_t type;
|
||||
u_int8_t id;
|
||||
u8 type;
|
||||
u8 id;
|
||||
} key;
|
||||
union {
|
||||
u_int32_t immediate;
|
||||
u_int32_t csr_offset;
|
||||
u32 immediate;
|
||||
u32 csr_offset;
|
||||
struct csr1212_leaf leaf;
|
||||
struct csr1212_directory directory;
|
||||
} value;
|
||||
|
@ -260,15 +153,15 @@ struct csr1212_keyval {
|
|||
|
||||
/* used in generating and/or parsing CSR image */
|
||||
struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
|
||||
u_int32_t offset; /* position in CSR from 0xffff f000 0000 */
|
||||
u_int8_t valid; /* flag indicating keyval has valid data*/
|
||||
u32 offset; /* position in CSR from 0xffff f000 0000 */
|
||||
u8 valid; /* flag indicating keyval has valid data*/
|
||||
};
|
||||
|
||||
|
||||
struct csr1212_cache_region {
|
||||
struct csr1212_cache_region *next, *prev;
|
||||
u_int32_t offset_start; /* inclusive */
|
||||
u_int32_t offset_end; /* exclusive */
|
||||
u32 offset_start; /* inclusive */
|
||||
u32 offset_end; /* exclusive */
|
||||
};
|
||||
|
||||
struct csr1212_csr_rom_cache {
|
||||
|
@ -276,18 +169,18 @@ struct csr1212_csr_rom_cache {
|
|||
struct csr1212_cache_region *filled_head, *filled_tail;
|
||||
struct csr1212_keyval *layout_head, *layout_tail;
|
||||
size_t size;
|
||||
u_int32_t offset;
|
||||
u32 offset;
|
||||
struct csr1212_keyval *ext_rom;
|
||||
size_t len;
|
||||
|
||||
/* Must be last */
|
||||
u_int32_t data[0]; /* older gcc can't handle [] which is standard */
|
||||
u32 data[0]; /* older gcc can't handle [] which is standard */
|
||||
};
|
||||
|
||||
struct csr1212_csr {
|
||||
size_t bus_info_len; /* bus info block length in bytes */
|
||||
size_t crc_len; /* crc length in bytes */
|
||||
u_int32_t *bus_info_data; /* bus info data incl bus name and EUI */
|
||||
u32 *bus_info_data; /* bus info data incl bus name and EUI */
|
||||
|
||||
void *private; /* private, bus specific data */
|
||||
struct csr1212_bus_ops *ops;
|
||||
|
@ -305,52 +198,38 @@ struct csr1212_bus_ops {
|
|||
* from remote nodes when parsing a Config ROM (i.e., read Config ROM
|
||||
* entries located in the Units Space. Must return 0 on success
|
||||
* anything else indicates an error. */
|
||||
int (*bus_read) (struct csr1212_csr *csr, u_int64_t addr,
|
||||
u_int16_t length, void *buffer, void *private);
|
||||
int (*bus_read) (struct csr1212_csr *csr, u64 addr,
|
||||
u16 length, void *buffer, void *private);
|
||||
|
||||
/* This function is used by csr1212 to allocate a region in units space
|
||||
* in the event that Config ROM entries don't all fit in the predefined
|
||||
* 1K region. The void *private parameter is private member of struct
|
||||
* csr1212_csr. */
|
||||
u_int64_t (*allocate_addr_range) (u_int64_t size, u_int32_t alignment,
|
||||
void *private);
|
||||
|
||||
u64 (*allocate_addr_range) (u64 size, u32 alignment, void *private);
|
||||
|
||||
/* This function is used by csr1212 to release a region in units space
|
||||
* that is no longer needed. */
|
||||
void (*release_addr) (u_int64_t addr, void *private);
|
||||
void (*release_addr) (u64 addr, void *private);
|
||||
|
||||
/* This function is used by csr1212 to determine the max read request
|
||||
* supported by a remote node when reading the ConfigROM space. Must
|
||||
* return 0, 1, or 2 per IEEE 1212. */
|
||||
int (*get_max_rom) (u_int32_t *bus_info, void *private);
|
||||
int (*get_max_rom) (u32 *bus_info, void *private);
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/* Descriptor Leaf manipulation macros */
|
||||
#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
|
||||
#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
|
||||
#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
|
||||
#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
|
||||
|
||||
#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) >> CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
|
||||
(be32_to_cpu((kv)->value.leaf.data[0]) >> \
|
||||
CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
|
||||
#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) & \
|
||||
(be32_to_cpu((kv)->value.leaf.data[0]) & \
|
||||
CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
|
||||
#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
|
||||
(&((kv)->value.leaf.data[1]))
|
||||
|
||||
#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
|
||||
((kv)->value.leaf.data[0] = \
|
||||
CSR1212_CPU_TO_BE32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
|
||||
((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
|
||||
#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
|
||||
((kv)->value.leaf.data[0] = \
|
||||
CSR1212_CPU_TO_BE32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
|
||||
CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
|
||||
((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
|
||||
|
||||
/* Text Descriptor Leaf manipulation macros */
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
|
||||
|
@ -358,182 +237,21 @@ struct csr1212_bus_ops {
|
|||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t))
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
|
||||
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
|
||||
(be32_to_cpu((kv)->value.leaf.data[1]) >> \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
|
||||
((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
|
||||
((be32_to_cpu((kv)->value.leaf.data[1]) >> \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) & \
|
||||
(be32_to_cpu((kv)->value.leaf.data[1]) & \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
|
||||
(&((kv)->value.leaf.data[2]))
|
||||
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
|
||||
((kv)->value.leaf.data[1] = \
|
||||
((kv)->value.leaf.data[1] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
|
||||
CSR1212_CPU_TO_BE32(((width) & \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
|
||||
((kv)->value.leaf.data[1] = \
|
||||
((kv)->value.leaf.data[1] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
|
||||
CSR1212_CPU_TO_BE32(((char_set) & \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
|
||||
#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
|
||||
((kv)->value.leaf.data[1] = \
|
||||
((kv)->value.leaf.data[1] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
|
||||
CSR1212_CPU_TO_BE32(((language) & \
|
||||
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
|
||||
|
||||
|
||||
/* Icon Descriptor Leaf manipulation macros */
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK 0xffffff
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT 30
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK 0x3 /* after shift */
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT 16
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK 0xf /* after shift */
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT 16
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK 0xffff /* after shift */
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK 0xffff
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD (3 * sizeof(u_int32_t))
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[2]) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT)
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv) \
|
||||
((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK)
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN(kv) \
|
||||
((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) >> \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_SHIFT) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_MASK)
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN(kv) \
|
||||
(CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv) \
|
||||
(&((kv)->value.leaf.data[5]))
|
||||
|
||||
static inline u_int32_t *CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(struct csr1212_keyval *kv)
|
||||
{
|
||||
static const int pd[4] = { 0, 4, 16, 256 };
|
||||
static const int cs[16] = { 4, 2 };
|
||||
int ps = pd[CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv)];
|
||||
|
||||
return &kv->value.leaf.data[5 +
|
||||
(ps * cs[CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv)]) /
|
||||
sizeof(u_int32_t)];
|
||||
}
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version) \
|
||||
((kv)->value.leaf.data[2] = \
|
||||
((kv)->value.leaf.data[2] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK))) | \
|
||||
CSR1212_CPU_TO_BE32(((version) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)))
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth) \
|
||||
((kv)->value.leaf.data[3] = \
|
||||
((kv)->value.leaf.data[3] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK << \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))) | \
|
||||
CSR1212_CPU_TO_BE32(((palette_depth) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK) << \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space) \
|
||||
((kv)->value.leaf.data[3] = \
|
||||
((kv)->value.leaf.data[3] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK << \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))) | \
|
||||
CSR1212_CPU_TO_BE32(((color_space) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK) << \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
|
||||
((kv)->value.leaf.data[3] = \
|
||||
((kv)->value.leaf.data[3] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
|
||||
CSR1212_CPU_TO_BE32(((language) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan) \
|
||||
((kv)->value.leaf.data[4] = \
|
||||
((kv)->value.leaf.data[4] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK << \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))) | \
|
||||
CSR1212_CPU_TO_BE32(((hscan) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK) << \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))
|
||||
|
||||
#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan) \
|
||||
((kv)->value.leaf.data[4] = \
|
||||
(((kv)->value.leaf.data[4] & \
|
||||
CSR1212_CPU_TO_BE32(~CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK))) | \
|
||||
CSR1212_CPU_TO_BE32(((vscan) & \
|
||||
CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)))
|
||||
|
||||
|
||||
/* Modifiable Descriptor Leaf manipulation macros */
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT 16
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK 0xffff
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_SHIFT 32
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK 0xffff
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK 0xffffffffULL
|
||||
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE(kv) \
|
||||
CSR1212_BE16_TO_CPU((kv)->value.leaf.data[0] >> CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE_SHIFT)
|
||||
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_ADDRESS(kv) \
|
||||
(CSR1212_BE16_TO_CPU(((u_int64_t)((kv)->value.leaf.data[0])) << \
|
||||
CSR1212_MODIFIABLE_DESCRIPTOR_ADDR_HI_SHIFT) | \
|
||||
CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]))
|
||||
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, size) \
|
||||
((kv)->value.leaf.data[0] = \
|
||||
((kv)->value.leaf.data[0] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK << \
|
||||
CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))) | \
|
||||
CSR1212_CPU_TO_BE32(((size) & \
|
||||
CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK) << \
|
||||
CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))
|
||||
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, addr) \
|
||||
((kv)->value.leaf.data[0] = \
|
||||
((kv)->value.leaf.data[0] & \
|
||||
CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK))) | \
|
||||
CSR1212_CPU_TO_BE32(((addr) & \
|
||||
CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK)))
|
||||
|
||||
#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, addr) \
|
||||
((kv)->value.leaf.data[1] = \
|
||||
CSR1212_CPU_TO_BE32(addr & CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK))
|
||||
|
||||
|
||||
|
||||
/* The following 2 function are for creating new Configuration ROM trees. The
|
||||
* first function is used for both creating local trees and parsing remote
|
||||
|
@ -543,11 +261,10 @@ extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
|
|||
size_t bus_info_size,
|
||||
void *private);
|
||||
extern void csr1212_init_local_csr(struct csr1212_csr *csr,
|
||||
const u_int32_t *bus_info_data, int max_rom);
|
||||
const u32 *bus_info_data, int max_rom);
|
||||
|
||||
|
||||
/* The following function destroys a Configuration ROM tree and release all
|
||||
* memory taken by the tree. */
|
||||
/* Destroy a Configuration ROM tree and release all memory taken by the tree. */
|
||||
extern void csr1212_destroy_csr(struct csr1212_csr *csr);
|
||||
|
||||
|
||||
|
@ -555,50 +272,20 @@ extern void csr1212_destroy_csr(struct csr1212_csr *csr);
|
|||
* a Configuration ROM tree. Code that creates new keyvals with these functions
|
||||
* must release those keyvals with csr1212_release_keyval() when they are no
|
||||
* longer needed. */
|
||||
extern struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value);
|
||||
extern struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data,
|
||||
size_t data_len);
|
||||
extern struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key,
|
||||
u_int32_t csr_offset);
|
||||
extern struct csr1212_keyval *csr1212_new_directory(u_int8_t key);
|
||||
extern struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec,
|
||||
u_int32_t key,
|
||||
u_int32_t value);
|
||||
extern struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec,
|
||||
u_int32_t key,
|
||||
const void *data,
|
||||
size_t data_len);
|
||||
extern struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype,
|
||||
u_int32_t specifier_id,
|
||||
const void *data,
|
||||
size_t data_len);
|
||||
extern struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
|
||||
u_int16_t cset,
|
||||
u_int16_t language,
|
||||
const void *data,
|
||||
size_t data_len);
|
||||
extern struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value);
|
||||
extern struct csr1212_keyval *csr1212_new_directory(u8 key);
|
||||
extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
|
||||
extern struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
|
||||
u_int8_t palette_depth,
|
||||
u_int8_t color_space,
|
||||
u_int16_t language,
|
||||
u_int16_t hscan,
|
||||
u_int16_t vscan,
|
||||
u_int32_t *palette,
|
||||
u_int32_t *pixels);
|
||||
extern struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
|
||||
u_int64_t address);
|
||||
extern struct csr1212_keyval *csr1212_new_keyword_leaf(int strc,
|
||||
const char *strv[]);
|
||||
|
||||
|
||||
/* The following functions manage association between keyvals. Typically,
|
||||
/* The following function manages association between keyvals. Typically,
|
||||
* Descriptor Leaves and Directories will be associated with another keyval and
|
||||
* it is desirable for the Descriptor keyval to be place immediately after the
|
||||
* keyval that it is associated with.*/
|
||||
extern int csr1212_associate_keyval(struct csr1212_keyval *kv,
|
||||
struct csr1212_keyval *associate);
|
||||
extern void csr1212_disassociate_keyval(struct csr1212_keyval *kv);
|
||||
* keyval that it is associated with.
|
||||
* Take care with subsequent ROM modifications: There is no function to remove
|
||||
* previously specified associations.
|
||||
*/
|
||||
extern void csr1212_associate_keyval(struct csr1212_keyval *kv,
|
||||
struct csr1212_keyval *associate);
|
||||
|
||||
|
||||
/* The following functions manage the association of a keyval and directories.
|
||||
|
@ -609,23 +296,15 @@ extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
|
|||
struct csr1212_keyval *kv);
|
||||
|
||||
|
||||
/* The following functions create a Configuration ROM image from the tree of
|
||||
* keyvals provided. csr1212_generate_csr_image() creates a complete image in
|
||||
* the list of caches available via csr->cache_head. The other functions are
|
||||
* provided should there be a need to create a flat image without restrictions
|
||||
* placed by IEEE 1212. */
|
||||
extern struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
|
||||
struct csr1212_keyval *start_kv,
|
||||
int start_pos);
|
||||
extern size_t csr1212_generate_layout_order(struct csr1212_keyval *kv);
|
||||
extern void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache);
|
||||
/* Creates a complete Configuration ROM image in the list of caches available
|
||||
* via csr->cache_head. */
|
||||
extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
|
||||
|
||||
|
||||
/* This is a convience function for reading a block of data out of one of the
|
||||
* caches in the csr->cache_head list. */
|
||||
extern int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer,
|
||||
u_int32_t len);
|
||||
extern int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer,
|
||||
u32 len);
|
||||
|
||||
|
||||
/* The following functions are in place for parsing Configuration ROM images.
|
||||
|
@ -635,15 +314,11 @@ extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
|
|||
struct csr1212_csr_rom_cache *cache);
|
||||
extern int csr1212_parse_csr(struct csr1212_csr *csr);
|
||||
|
||||
/* These are internal functions referenced by inline functions below. */
|
||||
extern int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
|
||||
extern void _csr1212_destroy_keyval(struct csr1212_keyval *kv);
|
||||
|
||||
|
||||
/* This function allocates a new cache which may be used for either parsing or
|
||||
* generating sub-sets of Configuration ROM images. */
|
||||
static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t offset,
|
||||
size_t size)
|
||||
static inline struct csr1212_csr_rom_cache *
|
||||
csr1212_rom_cache_malloc(u32 offset, size_t size)
|
||||
{
|
||||
struct csr1212_csr_rom_cache *cache;
|
||||
|
||||
|
@ -667,16 +342,8 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o
|
|||
|
||||
/* This function ensures that a keyval contains data when referencing a keyval
|
||||
* created by parsing a Configuration ROM. */
|
||||
static inline struct csr1212_keyval *csr1212_get_keyval(struct csr1212_csr *csr,
|
||||
struct csr1212_keyval *kv)
|
||||
{
|
||||
if (!kv)
|
||||
return NULL;
|
||||
if (!kv->valid)
|
||||
if (_csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
|
||||
return NULL;
|
||||
return kv;
|
||||
}
|
||||
extern struct csr1212_keyval *
|
||||
csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
|
||||
|
||||
|
||||
/* This function increments the reference count for a keyval should there be a
|
||||
|
@ -691,37 +358,29 @@ static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
|
|||
* keyval when there are no more users of the keyval. This should be called by
|
||||
* any code that calls csr1212_keep_keyval() or any of the keyval creation
|
||||
* routines csr1212_new_*(). */
|
||||
static inline void csr1212_release_keyval(struct csr1212_keyval *kv)
|
||||
{
|
||||
if (kv->refcnt > 1)
|
||||
kv->refcnt--;
|
||||
else
|
||||
_csr1212_destroy_keyval(kv);
|
||||
}
|
||||
extern void csr1212_release_keyval(struct csr1212_keyval *kv);
|
||||
|
||||
|
||||
/*
|
||||
* This macro allows for looping over the keyval entries in a directory and it
|
||||
* ensures that keyvals from remote ConfigROMs are parsed properly.
|
||||
*
|
||||
* _csr is a struct csr1212_csr * that points to CSR associated with dir.
|
||||
* _kv is a struct csr1212_keyval * that'll point to the current keyval (loop index).
|
||||
* _dir is a struct csr1212_keyval * that points to the directory to be looped.
|
||||
* _pos is a struct csr1212_dentry * that is used internally for indexing.
|
||||
* struct csr1212_csr *_csr points to the CSR associated with dir.
|
||||
* struct csr1212_keyval *_kv points to the current keyval (loop index).
|
||||
* struct csr1212_keyval *_dir points to the directory to be looped.
|
||||
* struct csr1212_dentry *_pos is used internally for indexing.
|
||||
*
|
||||
* kv will be NULL upon exit of the loop.
|
||||
*/
|
||||
#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
|
||||
for (csr1212_get_keyval((_csr), (_dir)), \
|
||||
_pos = (_dir)->value.directory.dentries_head, \
|
||||
_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL; \
|
||||
(_kv) && (_pos); \
|
||||
(_kv->associate == NULL) ? \
|
||||
((_pos = _pos->next), \
|
||||
(_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : \
|
||||
NULL)) : \
|
||||
#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
|
||||
for (csr1212_get_keyval((_csr), (_dir)), \
|
||||
_pos = (_dir)->value.directory.dentries_head, \
|
||||
_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL;\
|
||||
(_kv) && (_pos); \
|
||||
(_kv->associate == NULL) ? \
|
||||
((_pos = _pos->next), (_kv = (_pos) ? \
|
||||
csr1212_get_keyval((_csr), _pos->kv) : \
|
||||
NULL)) : \
|
||||
(_kv = csr1212_get_keyval((_csr), _kv->associate)))
|
||||
|
||||
|
||||
|
||||
#endif /* __CSR1212_H__ */
|
||||
|
|
|
@ -62,6 +62,9 @@ void dma_prog_region_free(struct dma_prog_region *prog)
|
|||
|
||||
/* dma_region */
|
||||
|
||||
/**
|
||||
* dma_region_init - clear out all fields but do not allocate anything
|
||||
*/
|
||||
void dma_region_init(struct dma_region *dma)
|
||||
{
|
||||
dma->kvirt = NULL;
|
||||
|
@ -71,6 +74,9 @@ void dma_region_init(struct dma_region *dma)
|
|||
dma->sglist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_region_alloc - allocate the buffer and map it to the IOMMU
|
||||
*/
|
||||
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
|
||||
struct pci_dev *dev, int direction)
|
||||
{
|
||||
|
@ -128,6 +134,9 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_region_free - unmap and free the buffer
|
||||
*/
|
||||
void dma_region_free(struct dma_region *dma)
|
||||
{
|
||||
if (dma->n_dma_pages) {
|
||||
|
@ -167,6 +176,12 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
|
|||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_region_offset_to_bus - get bus address of an offset within a DMA region
|
||||
*
|
||||
* Returns the DMA bus address of the byte with the given @offset relative to
|
||||
* the beginning of the @dma.
|
||||
*/
|
||||
dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
|
||||
unsigned long offset)
|
||||
{
|
||||
|
@ -177,6 +192,9 @@ dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
|
|||
return sg_dma_address(sg) + rem;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_region_sync_for_cpu - sync the CPU's view of the buffer
|
||||
*/
|
||||
void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
|
||||
unsigned long len)
|
||||
{
|
||||
|
@ -193,6 +211,9 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
|
|||
dma->direction);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_region_sync_for_device - sync the IO bus' view of the buffer
|
||||
*/
|
||||
void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
|
||||
unsigned long len)
|
||||
{
|
||||
|
@ -244,6 +265,9 @@ static struct vm_operations_struct dma_region_vm_ops = {
|
|||
.nopage = dma_region_pagefault,
|
||||
};
|
||||
|
||||
/**
|
||||
* dma_region_mmap - map the buffer into a user space process
|
||||
*/
|
||||
int dma_region_mmap(struct dma_region *dma, struct file *file,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
|
|
|
@ -66,35 +66,23 @@ struct dma_region {
|
|||
int direction;
|
||||
};
|
||||
|
||||
/* clear out all fields but do not allocate anything */
|
||||
void dma_region_init(struct dma_region *dma);
|
||||
|
||||
/* allocate the buffer and map it to the IOMMU */
|
||||
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
|
||||
struct pci_dev *dev, int direction);
|
||||
|
||||
/* unmap and free the buffer */
|
||||
void dma_region_free(struct dma_region *dma);
|
||||
|
||||
/* sync the CPU's view of the buffer */
|
||||
void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
|
||||
unsigned long len);
|
||||
|
||||
/* sync the IO bus' view of the buffer */
|
||||
void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
|
||||
unsigned long len);
|
||||
|
||||
/* map the buffer into a user space process */
|
||||
int dma_region_mmap(struct dma_region *dma, struct file *file,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/* macro to index into a DMA region (or dma_prog_region) */
|
||||
#define dma_region_i(_dma, _type, _index) \
|
||||
( ((_type*) ((_dma)->kvirt)) + (_index) )
|
||||
|
||||
/* return the DMA bus address of the byte with the given offset
|
||||
* relative to the beginning of the dma_region */
|
||||
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
|
||||
unsigned long offset);
|
||||
|
||||
/**
|
||||
* dma_region_i - macro to index into a DMA region (or dma_prog_region)
|
||||
*/
|
||||
#define dma_region_i(_dma, _type, _index) \
|
||||
( ((_type*) ((_dma)->kvirt)) + (_index) )
|
||||
|
||||
#endif /* IEEE1394_DMA_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -25,8 +25,11 @@
|
|||
#define __ETH1394_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "ieee1394.h"
|
||||
#include "ieee1394_types.h"
|
||||
|
||||
/* Register for incoming packets. This is 4096 bytes, which supports up to
|
||||
* S3200 (per Table 16-3 of IEEE 1394b-2002). */
|
||||
|
@ -34,22 +37,15 @@
|
|||
|
||||
/* GASP identifier numbers for IPv4 over IEEE 1394 */
|
||||
#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
|
||||
#define ETHER1394_GASP_SPECIFIER_ID_HI ((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
|
||||
#define ETHER1394_GASP_SPECIFIER_ID_LO (ETHER1394_GASP_SPECIFIER_ID & 0xff)
|
||||
#define ETHER1394_GASP_SPECIFIER_ID_HI ((0x00005E >> 8) & 0xffff)
|
||||
#define ETHER1394_GASP_SPECIFIER_ID_LO (0x00005E & 0xff)
|
||||
#define ETHER1394_GASP_VERSION 1
|
||||
|
||||
#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* GASP header overhead */
|
||||
#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* for GASP header */
|
||||
|
||||
#define ETHER1394_GASP_BUFFERS 16
|
||||
#define ETHER1394_GASP_BUFFERS 16
|
||||
|
||||
/* rawiso buffer size - due to a limitation in rawiso, we must limit each
|
||||
* GASP buffer to be less than PAGE_SIZE. */
|
||||
#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \
|
||||
min((unsigned int)PAGE_SIZE, \
|
||||
2 * (1U << (priv->host->csr.max_rec + 1)))
|
||||
|
||||
/* Node set == 64 */
|
||||
#define NODE_SET (ALL_NODES + 1)
|
||||
#define NODE_SET (ALL_NODES + 1) /* Node set == 64 */
|
||||
|
||||
enum eth1394_bc_states { ETHER1394_BC_ERROR,
|
||||
ETHER1394_BC_RUNNING,
|
||||
|
@ -85,19 +81,14 @@ struct eth1394hdr {
|
|||
unsigned short h_proto; /* packet type ID field */
|
||||
} __attribute__((packed));
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct eth1394hdr *)skb_mac_header(skb);
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
|
||||
|
||||
/* IP1394 headers */
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/* Unfragmented */
|
||||
#if defined __BIG_ENDIAN_BITFIELD
|
||||
|
|
|
@ -70,8 +70,12 @@ static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Returns a per host/driver data structure that was previously stored by
|
||||
* hpsb_create_hostinfo. */
|
||||
/**
|
||||
* hpsb_get_hostinfo - retrieve a hostinfo pointer bound to this driver/host
|
||||
*
|
||||
* Returns a per @host and @hl driver data structure that was previously stored
|
||||
* by hpsb_create_hostinfo.
|
||||
*/
|
||||
void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
|
||||
{
|
||||
struct hl_host_info *hi = hl_get_hostinfo(hl, host);
|
||||
|
@ -79,7 +83,13 @@ void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
|
|||
return hi ? hi->data : NULL;
|
||||
}
|
||||
|
||||
/* If size is zero, then the return here is only valid for error checking */
|
||||
/**
|
||||
* hpsb_create_hostinfo - allocate a hostinfo pointer bound to this driver/host
|
||||
*
|
||||
* Allocate a hostinfo pointer backed by memory with @data_size and bind it to
|
||||
* to this @hl driver and @host. If @data_size is zero, then the return here is
|
||||
* only valid for error checking.
|
||||
*/
|
||||
void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
size_t data_size)
|
||||
{
|
||||
|
@ -113,6 +123,11 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
|||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_set_hostinfo - set the hostinfo pointer to something useful
|
||||
*
|
||||
* Usually follows a call to hpsb_create_hostinfo, where the size is 0.
|
||||
*/
|
||||
int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
void *data)
|
||||
{
|
||||
|
@ -132,6 +147,11 @@ int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_destroy_hostinfo - free and remove a hostinfo pointer
|
||||
*
|
||||
* Free and remove the hostinfo pointer bound to this @hl driver and @host.
|
||||
*/
|
||||
void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
|
||||
{
|
||||
struct hl_host_info *hi;
|
||||
|
@ -147,6 +167,12 @@ void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
|
|||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_set_hostinfo_key - set an alternate lookup key for an hostinfo
|
||||
*
|
||||
* Sets an alternate lookup key for the hostinfo bound to this @hl driver and
|
||||
* @host.
|
||||
*/
|
||||
void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
unsigned long key)
|
||||
{
|
||||
|
@ -158,6 +184,9 @@ void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
|||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_get_hostinfo_bykey - retrieve a hostinfo pointer by its alternate key
|
||||
*/
|
||||
void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
|
||||
{
|
||||
struct hl_host_info *hi;
|
||||
|
@ -189,6 +218,12 @@ static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_register_highlevel - register highlevel driver
|
||||
*
|
||||
* The name pointer in @hl has to stay valid at all times because the string is
|
||||
* not copied.
|
||||
*/
|
||||
void hpsb_register_highlevel(struct hpsb_highlevel *hl)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -258,6 +293,9 @@ static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_unregister_highlevel - unregister highlevel driver
|
||||
*/
|
||||
void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -273,6 +311,19 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
|
|||
nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_allocate_and_register_addrspace - alloc' and reg' a host address space
|
||||
*
|
||||
* @start and @end are 48 bit pointers and have to be quadlet aligned.
|
||||
* @end points to the first address behind the handled addresses. This
|
||||
* function can be called multiple times for a single hpsb_highlevel @hl to
|
||||
* implement sparse register sets. The requested region must not overlap any
|
||||
* previously allocated region, otherwise registering will fail.
|
||||
*
|
||||
* It returns true for successful allocation. Address spaces can be
|
||||
* unregistered with hpsb_unregister_addrspace. All remaining address spaces
|
||||
* are automatically deallocated together with the hpsb_highlevel @hl.
|
||||
*/
|
||||
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
|
||||
struct hpsb_host *host,
|
||||
struct hpsb_address_ops *ops,
|
||||
|
@ -348,6 +399,19 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
|
|||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_register_addrspace - register a host address space
|
||||
*
|
||||
* @start and @end are 48 bit pointers and have to be quadlet aligned.
|
||||
* @end points to the first address behind the handled addresses. This
|
||||
* function can be called multiple times for a single hpsb_highlevel @hl to
|
||||
* implement sparse register sets. The requested region must not overlap any
|
||||
* previously allocated region, otherwise registering will fail.
|
||||
*
|
||||
* It returns true for successful allocation. Address spaces can be
|
||||
* unregistered with hpsb_unregister_addrspace. All remaining address spaces
|
||||
* are automatically deallocated together with the hpsb_highlevel @hl.
|
||||
*/
|
||||
int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
struct hpsb_address_ops *ops, u64 start, u64 end)
|
||||
{
|
||||
|
@ -419,6 +483,11 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
|||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_listen_channel - enable receving a certain isochronous channel
|
||||
*
|
||||
* Reception is handled through the @hl's iso_receive op.
|
||||
*/
|
||||
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
unsigned int channel)
|
||||
{
|
||||
|
@ -431,6 +500,9 @@ int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_unlisten_channel - disable receving a certain isochronous channel
|
||||
*/
|
||||
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
unsigned int channel)
|
||||
{
|
||||
|
@ -528,6 +600,17 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
|
|||
read_unlock_irqrestore(&hl_irqs_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* highlevel_read, highlevel_write, highlevel_lock, highlevel_lock64:
|
||||
*
|
||||
* These functions are called to handle transactions. They are called when a
|
||||
* packet arrives. The flags argument contains the second word of the first
|
||||
* header quadlet of the incoming packet (containing transaction label, retry
|
||||
* code, transaction code and priority). These functions either return a
|
||||
* response code or a negative number. In the first case a response will be
|
||||
* generated. In the latter case, no response will be sent and the driver which
|
||||
* handled the request will send the response itself.
|
||||
*/
|
||||
int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
|
||||
unsigned int length, u16 flags)
|
||||
{
|
||||
|
|
|
@ -99,16 +99,6 @@ struct hpsb_address_ops {
|
|||
void highlevel_add_host(struct hpsb_host *host);
|
||||
void highlevel_remove_host(struct hpsb_host *host);
|
||||
void highlevel_host_reset(struct hpsb_host *host);
|
||||
|
||||
/*
|
||||
* These functions are called to handle transactions. They are called when a
|
||||
* packet arrives. The flags argument contains the second word of the first
|
||||
* header quadlet of the incoming packet (containing transaction label, retry
|
||||
* code, transaction code and priority). These functions either return a
|
||||
* response code or a negative number. In the first case a response will be
|
||||
* generated. In the latter case, no response will be sent and the driver which
|
||||
* handled the request will send the response itself.
|
||||
*/
|
||||
int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
|
||||
unsigned int length, u16 flags);
|
||||
int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
|
||||
|
@ -119,30 +109,13 @@ int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
|
|||
int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
|
||||
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
|
||||
u16 flags);
|
||||
|
||||
void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length);
|
||||
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
|
||||
void *data, size_t length);
|
||||
|
||||
/*
|
||||
* Register highlevel driver. The name pointer has to stay valid at all times
|
||||
* because the string is not copied.
|
||||
*/
|
||||
void hpsb_register_highlevel(struct hpsb_highlevel *hl);
|
||||
void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
|
||||
|
||||
/*
|
||||
* Register handlers for host address spaces. Start and end are 48 bit pointers
|
||||
* and have to be quadlet aligned. Argument "end" points to the first address
|
||||
* behind the handled addresses. This function can be called multiple times for
|
||||
* a single hpsb_highlevel to implement sparse register sets. The requested
|
||||
* region must not overlap any previously allocated region, otherwise
|
||||
* registering will fail.
|
||||
*
|
||||
* It returns true for successful allocation. Address spaces can be
|
||||
* unregistered with hpsb_unregister_addrspace. All remaining address spaces
|
||||
* are automatically deallocated together with the hpsb_highlevel.
|
||||
*/
|
||||
u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
|
||||
struct hpsb_host *host,
|
||||
struct hpsb_address_ops *ops,
|
||||
|
@ -152,45 +125,19 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
|||
struct hpsb_address_ops *ops, u64 start, u64 end);
|
||||
int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
u64 start);
|
||||
|
||||
/*
|
||||
* Enable or disable receving a certain isochronous channel through the
|
||||
* iso_receive op.
|
||||
*/
|
||||
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
unsigned int channel);
|
||||
unsigned int channel);
|
||||
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
unsigned int channel);
|
||||
|
||||
/* Retrieve a hostinfo pointer bound to this driver/host */
|
||||
void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
|
||||
|
||||
/* Allocate a hostinfo pointer of data_size bound to this driver/host */
|
||||
void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
size_t data_size);
|
||||
|
||||
/* Free and remove the hostinfo pointer bound to this driver/host */
|
||||
void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
|
||||
|
||||
/* Set an alternate lookup key for the hostinfo bound to this driver/host */
|
||||
void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
unsigned long key);
|
||||
|
||||
/* Retrieve the alternate lookup key for the hostinfo bound to this
|
||||
* driver/host */
|
||||
unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl,
|
||||
struct hpsb_host *host);
|
||||
|
||||
/* Retrieve a hostinfo pointer bound to this driver using its alternate key */
|
||||
void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
|
||||
|
||||
/* Set the hostinfo pointer to something useful. Usually follows a call to
|
||||
* hpsb_create_hostinfo, where the size is 0. */
|
||||
int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
|
||||
void *data);
|
||||
|
||||
/* Retrieve hpsb_host using a highlevel handle and a key */
|
||||
struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl,
|
||||
unsigned long key);
|
||||
|
||||
#endif /* IEEE1394_HIGHLEVEL_H */
|
||||
|
|
|
@ -94,14 +94,6 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The pending_packet_queue is special in that it's processed
|
||||
* from hardirq context too (such as hpsb_bus_reset()). Hence
|
||||
* split the lock class from the usual networking skb-head
|
||||
* lock class by using a separate key for it:
|
||||
*/
|
||||
static struct lock_class_key pending_packet_queue_key;
|
||||
|
||||
static DEFINE_MUTEX(host_num_alloc);
|
||||
|
||||
/**
|
||||
|
@ -137,9 +129,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
|
|||
h->hostdata = h + 1;
|
||||
h->driver = drv;
|
||||
|
||||
skb_queue_head_init(&h->pending_packet_queue);
|
||||
lockdep_set_class(&h->pending_packet_queue.lock,
|
||||
&pending_packet_queue_key);
|
||||
INIT_LIST_HEAD(&h->pending_packets);
|
||||
INIT_LIST_HEAD(&h->addr_space);
|
||||
|
||||
for (i = 2; i < 16; i++)
|
||||
|
@ -190,7 +180,7 @@ int hpsb_add_host(struct hpsb_host *host)
|
|||
{
|
||||
if (hpsb_default_host_entry(host))
|
||||
return -ENOMEM;
|
||||
hpsb_add_extra_config_roms(host);
|
||||
|
||||
highlevel_add_host(host);
|
||||
return 0;
|
||||
}
|
||||
|
@ -212,12 +202,19 @@ void hpsb_remove_host(struct hpsb_host *host)
|
|||
|
||||
host->driver = &dummy_driver;
|
||||
highlevel_remove_host(host);
|
||||
hpsb_remove_extra_config_roms(host);
|
||||
|
||||
class_device_unregister(&host->class_dev);
|
||||
device_unregister(&host->device);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_update_config_rom_image - updates configuration ROM image of a host
|
||||
*
|
||||
* Updates the configuration ROM image of a host. rom_version must be the
|
||||
* current version, otherwise it will fail with return value -1. If this
|
||||
* host does not support config-rom-update, it will return -%EINVAL.
|
||||
* Return value 0 indicates success.
|
||||
*/
|
||||
int hpsb_update_config_rom_image(struct hpsb_host *host)
|
||||
{
|
||||
unsigned long reset_delay;
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include <linux/device.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
@ -25,8 +24,7 @@ struct hpsb_host {
|
|||
|
||||
atomic_t generation;
|
||||
|
||||
struct sk_buff_head pending_packet_queue;
|
||||
|
||||
struct list_head pending_packets;
|
||||
struct timer_list timeout;
|
||||
unsigned long timeout_interval;
|
||||
|
||||
|
@ -202,12 +200,6 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
|
|||
int hpsb_add_host(struct hpsb_host *host);
|
||||
void hpsb_resume_host(struct hpsb_host *host);
|
||||
void hpsb_remove_host(struct hpsb_host *host);
|
||||
|
||||
/* Updates the configuration rom image of a host. rom_version must be the
|
||||
* current version, otherwise it will fail with return value -1. If this
|
||||
* host does not support config-rom-update, it will return -EINVAL.
|
||||
* Return value 0 indicates success.
|
||||
*/
|
||||
int hpsb_update_config_rom_image(struct hpsb_host *host);
|
||||
|
||||
#endif /* _IEEE1394_HOSTS_H */
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/moduleparam.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/preempt.h>
|
||||
|
@ -96,13 +95,15 @@ static void queue_packet_complete(struct hpsb_packet *packet);
|
|||
|
||||
|
||||
/**
|
||||
* hpsb_set_packet_complete_task - set the task that runs when a packet
|
||||
* completes. You cannot call this more than once on a single packet
|
||||
* before it is sent.
|
||||
*
|
||||
* hpsb_set_packet_complete_task - set task that runs when a packet completes
|
||||
* @packet: the packet whose completion we want the task added to
|
||||
* @routine: function to call
|
||||
* @data: data (if any) to pass to the above function
|
||||
*
|
||||
* Set the task that runs when a packet completes. You cannot call this more
|
||||
* than once on a single packet before it is sent.
|
||||
*
|
||||
* Typically, the complete @routine is responsible to call hpsb_free_packet().
|
||||
*/
|
||||
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
|
||||
void (*routine)(void *), void *data)
|
||||
|
@ -115,12 +116,12 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
|
|||
|
||||
/**
|
||||
* hpsb_alloc_packet - allocate new packet structure
|
||||
* @data_size: size of the data block to be allocated
|
||||
* @data_size: size of the data block to be allocated, in bytes
|
||||
*
|
||||
* This function allocates, initializes and returns a new &struct hpsb_packet.
|
||||
* It can be used in interrupt context. A header block is always included, its
|
||||
* size is big enough to contain all possible 1394 headers. The data block is
|
||||
* only allocated when @data_size is not zero.
|
||||
* It can be used in interrupt context. A header block is always included and
|
||||
* initialized with zeros. Its size is big enough to contain all possible 1394
|
||||
* headers. The data block is only allocated if @data_size is not zero.
|
||||
*
|
||||
* For packets for which responses will be received the @data_size has to be big
|
||||
* enough to contain the response's data block since no further allocation
|
||||
|
@ -135,50 +136,49 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
|
|||
*/
|
||||
struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
|
||||
{
|
||||
struct hpsb_packet *packet = NULL;
|
||||
struct sk_buff *skb;
|
||||
struct hpsb_packet *packet;
|
||||
|
||||
data_size = ((data_size + 3) & ~3);
|
||||
|
||||
skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC);
|
||||
if (!packet)
|
||||
return NULL;
|
||||
|
||||
memset(skb->data, 0, data_size + sizeof(*packet));
|
||||
|
||||
packet = (struct hpsb_packet *)skb->data;
|
||||
packet->skb = skb;
|
||||
|
||||
packet->header = packet->embedded_header;
|
||||
packet->state = hpsb_unused;
|
||||
packet->generation = -1;
|
||||
INIT_LIST_HEAD(&packet->driver_list);
|
||||
INIT_LIST_HEAD(&packet->queue);
|
||||
atomic_set(&packet->refcnt, 1);
|
||||
|
||||
if (data_size) {
|
||||
packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
|
||||
packet->data_size = data_size;
|
||||
packet->data = packet->embedded_data;
|
||||
packet->allocated_data_size = data_size;
|
||||
}
|
||||
|
||||
return packet;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* hpsb_free_packet - free packet and data associated with it
|
||||
* @packet: packet to free (is NULL safe)
|
||||
*
|
||||
* This function will free packet->data and finally the packet itself.
|
||||
* Frees @packet->data only if it was allocated through hpsb_alloc_packet().
|
||||
*/
|
||||
void hpsb_free_packet(struct hpsb_packet *packet)
|
||||
{
|
||||
if (packet && atomic_dec_and_test(&packet->refcnt)) {
|
||||
BUG_ON(!list_empty(&packet->driver_list));
|
||||
kfree_skb(packet->skb);
|
||||
BUG_ON(!list_empty(&packet->driver_list) ||
|
||||
!list_empty(&packet->queue));
|
||||
kfree(packet);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* hpsb_reset_bus - initiate bus reset on the given host
|
||||
* @host: host controller whose bus to reset
|
||||
* @type: one of enum reset_types
|
||||
*
|
||||
* Returns 1 if bus reset already in progress, 0 otherwise.
|
||||
*/
|
||||
int hpsb_reset_bus(struct hpsb_host *host, int type)
|
||||
{
|
||||
if (!host->in_bus_reset) {
|
||||
|
@ -229,6 +229,14 @@ int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_bus_reset - notify a bus reset to the core
|
||||
*
|
||||
* For host driver module usage. Safe to use in interrupt context, although
|
||||
* quite complex; so you may want to run it in the bottom rather than top half.
|
||||
*
|
||||
* Returns 1 if bus reset already in progress, 0 otherwise.
|
||||
*/
|
||||
int hpsb_bus_reset(struct hpsb_host *host)
|
||||
{
|
||||
if (host->in_bus_reset) {
|
||||
|
@ -405,6 +413,14 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* hpsb_selfid_received - hand over received selfid packet to the core
|
||||
*
|
||||
* For host driver module usage. Safe to use in interrupt context.
|
||||
*
|
||||
* The host driver should have done a successful complement check (second
|
||||
* quadlet is complement of first) beforehand.
|
||||
*/
|
||||
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
|
||||
{
|
||||
if (host->in_bus_reset) {
|
||||
|
@ -416,6 +432,15 @@ void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_selfid_complete - notify completion of SelfID stage to the core
|
||||
*
|
||||
* For host driver module usage. Safe to use in interrupt context, although
|
||||
* quite complex; so you may want to run it in the bottom rather than top half.
|
||||
*
|
||||
* Notify completion of SelfID stage to the core and report new physical ID
|
||||
* and whether host is root now.
|
||||
*/
|
||||
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
|
||||
{
|
||||
if (!host->in_bus_reset)
|
||||
|
@ -462,30 +487,41 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
|
|||
highlevel_host_reset(host);
|
||||
}
|
||||
|
||||
static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
/**
|
||||
* hpsb_packet_sent - notify core of sending a packet
|
||||
*
|
||||
* For host driver module usage. Safe to call from within a transmit packet
|
||||
* routine.
|
||||
*
|
||||
* Notify core of sending a packet. Ackcode is the ack code returned for async
|
||||
* transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
|
||||
* for other cases (internal errors that don't justify a panic).
|
||||
*/
|
||||
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
|
||||
int ackcode)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
|
||||
spin_lock_irqsave(&pending_packets_lock, flags);
|
||||
|
||||
packet->ack_code = ackcode;
|
||||
|
||||
if (packet->no_waiter || packet->state == hpsb_complete) {
|
||||
/* if packet->no_waiter, must not have a tlabel allocated */
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
hpsb_free_packet(packet);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_dec(&packet->refcnt); /* drop HC's reference */
|
||||
/* here the packet must be on the host->pending_packet_queue */
|
||||
/* here the packet must be on the host->pending_packets queue */
|
||||
|
||||
if (ackcode != ACK_PENDING || !packet->expect_response) {
|
||||
packet->state = hpsb_complete;
|
||||
__skb_unlink(packet->skb, &host->pending_packet_queue);
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
list_del_init(&packet->queue);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
queue_packet_complete(packet);
|
||||
return;
|
||||
}
|
||||
|
@ -493,7 +529,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
|
|||
packet->state = hpsb_pending;
|
||||
packet->sendtime = jiffies;
|
||||
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
|
||||
mod_timer(&host->timeout, jiffies + host->timeout_interval);
|
||||
}
|
||||
|
@ -504,9 +540,10 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
|
|||
* @rootid: root whose force_root bit should get set (-1 = don't set force_root)
|
||||
* @gapcnt: gap count value to set (-1 = don't set gap count)
|
||||
*
|
||||
* This function sends a PHY config packet on the bus through the specified host.
|
||||
* This function sends a PHY config packet on the bus through the specified
|
||||
* host.
|
||||
*
|
||||
* Return value: 0 for success or error number otherwise.
|
||||
* Return value: 0 for success or negative error number otherwise.
|
||||
*/
|
||||
int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
|
||||
{
|
||||
|
@ -567,12 +604,16 @@ int hpsb_send_packet(struct hpsb_packet *packet)
|
|||
WARN_ON(packet->no_waiter && packet->expect_response);
|
||||
|
||||
if (!packet->no_waiter || packet->expect_response) {
|
||||
unsigned long flags;
|
||||
|
||||
atomic_inc(&packet->refcnt);
|
||||
/* Set the initial "sendtime" to 10 seconds from now, to
|
||||
prevent premature expiry. If a packet takes more than
|
||||
10 seconds to hit the wire, we have bigger problems :) */
|
||||
packet->sendtime = jiffies + 10 * HZ;
|
||||
skb_queue_tail(&host->pending_packet_queue, packet->skb);
|
||||
spin_lock_irqsave(&pending_packets_lock, flags);
|
||||
list_add_tail(&packet->queue, &host->pending_packets);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
}
|
||||
|
||||
if (packet->node_id == host->node_id) {
|
||||
|
@ -621,6 +662,12 @@ static void complete_packet(void *data)
|
|||
complete((struct completion *) data);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_send_packet_and_wait - enqueue packet, block until transaction completes
|
||||
* @packet: packet to send
|
||||
*
|
||||
* Return value: 0 on success, negative errno on failure.
|
||||
*/
|
||||
int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
|
||||
{
|
||||
struct completion done;
|
||||
|
@ -642,86 +689,97 @@ static void send_packet_nocare(struct hpsb_packet *packet)
|
|||
}
|
||||
}
|
||||
|
||||
static size_t packet_size_to_data_size(size_t packet_size, size_t header_size,
|
||||
size_t buffer_size, int tcode)
|
||||
{
|
||||
size_t ret = packet_size <= header_size ? 0 : packet_size - header_size;
|
||||
|
||||
if (unlikely(ret > buffer_size))
|
||||
ret = buffer_size;
|
||||
|
||||
if (unlikely(ret + header_size != packet_size))
|
||||
HPSB_ERR("unexpected packet size %zd (tcode %d), bug?",
|
||||
packet_size, tcode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void handle_packet_response(struct hpsb_host *host, int tcode,
|
||||
quadlet_t *data, size_t size)
|
||||
{
|
||||
struct hpsb_packet *packet = NULL;
|
||||
struct sk_buff *skb;
|
||||
int tcode_match = 0;
|
||||
int tlabel;
|
||||
struct hpsb_packet *packet;
|
||||
int tlabel = (data[0] >> 10) & 0x3f;
|
||||
size_t header_size;
|
||||
unsigned long flags;
|
||||
|
||||
tlabel = (data[0] >> 10) & 0x3f;
|
||||
spin_lock_irqsave(&pending_packets_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
|
||||
list_for_each_entry(packet, &host->pending_packets, queue)
|
||||
if (packet->tlabel == tlabel &&
|
||||
packet->node_id == (data[1] >> 16))
|
||||
goto found;
|
||||
|
||||
skb_queue_walk(&host->pending_packet_queue, skb) {
|
||||
packet = (struct hpsb_packet *)skb->data;
|
||||
if ((packet->tlabel == tlabel)
|
||||
&& (packet->node_id == (data[1] >> 16))){
|
||||
break;
|
||||
}
|
||||
|
||||
packet = NULL;
|
||||
}
|
||||
|
||||
if (packet == NULL) {
|
||||
HPSB_DEBUG("unsolicited response packet received - no tlabel match");
|
||||
dump_packet("contents", data, 16, -1);
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
HPSB_DEBUG("unsolicited response packet received - %s",
|
||||
"no tlabel match");
|
||||
dump_packet("contents", data, 16, -1);
|
||||
return;
|
||||
|
||||
found:
|
||||
switch (packet->tcode) {
|
||||
case TCODE_WRITEQ:
|
||||
case TCODE_WRITEB:
|
||||
if (tcode != TCODE_WRITE_RESPONSE)
|
||||
if (unlikely(tcode != TCODE_WRITE_RESPONSE))
|
||||
break;
|
||||
tcode_match = 1;
|
||||
memcpy(packet->header, data, 12);
|
||||
break;
|
||||
header_size = 12;
|
||||
size = 0;
|
||||
goto dequeue;
|
||||
|
||||
case TCODE_READQ:
|
||||
if (tcode != TCODE_READQ_RESPONSE)
|
||||
if (unlikely(tcode != TCODE_READQ_RESPONSE))
|
||||
break;
|
||||
tcode_match = 1;
|
||||
memcpy(packet->header, data, 16);
|
||||
break;
|
||||
header_size = 16;
|
||||
size = 0;
|
||||
goto dequeue;
|
||||
|
||||
case TCODE_READB:
|
||||
if (tcode != TCODE_READB_RESPONSE)
|
||||
if (unlikely(tcode != TCODE_READB_RESPONSE))
|
||||
break;
|
||||
tcode_match = 1;
|
||||
BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
|
||||
memcpy(packet->header, data, 16);
|
||||
memcpy(packet->data, data + 4, size - 16);
|
||||
break;
|
||||
header_size = 16;
|
||||
size = packet_size_to_data_size(size, header_size,
|
||||
packet->allocated_data_size,
|
||||
tcode);
|
||||
goto dequeue;
|
||||
|
||||
case TCODE_LOCK_REQUEST:
|
||||
if (tcode != TCODE_LOCK_RESPONSE)
|
||||
if (unlikely(tcode != TCODE_LOCK_RESPONSE))
|
||||
break;
|
||||
tcode_match = 1;
|
||||
size = min((size - 16), (size_t)8);
|
||||
BUG_ON(packet->skb->len - sizeof(*packet) < size);
|
||||
memcpy(packet->header, data, 16);
|
||||
memcpy(packet->data, data + 4, size);
|
||||
break;
|
||||
header_size = 16;
|
||||
size = packet_size_to_data_size(min(size, (size_t)(16 + 8)),
|
||||
header_size,
|
||||
packet->allocated_data_size,
|
||||
tcode);
|
||||
goto dequeue;
|
||||
}
|
||||
|
||||
if (!tcode_match) {
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
HPSB_INFO("unsolicited response packet received - tcode mismatch");
|
||||
dump_packet("contents", data, 16, -1);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
HPSB_DEBUG("unsolicited response packet received - %s",
|
||||
"tcode mismatch");
|
||||
dump_packet("contents", data, 16, -1);
|
||||
return;
|
||||
|
||||
__skb_unlink(skb, &host->pending_packet_queue);
|
||||
dequeue:
|
||||
list_del_init(&packet->queue);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
|
||||
if (packet->state == hpsb_queued) {
|
||||
packet->sendtime = jiffies;
|
||||
packet->ack_code = ACK_PENDING;
|
||||
}
|
||||
|
||||
packet->state = hpsb_complete;
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
|
||||
memcpy(packet->header, data, header_size);
|
||||
if (size)
|
||||
memcpy(packet->data, data + 4, size);
|
||||
|
||||
queue_packet_complete(packet);
|
||||
}
|
||||
|
@ -735,6 +793,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
|
|||
p = hpsb_alloc_packet(dsize);
|
||||
if (unlikely(p == NULL)) {
|
||||
/* FIXME - send data_error response */
|
||||
HPSB_ERR("out of memory, cannot send response packet");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -784,7 +843,6 @@ static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
|
|||
static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
|
||||
{
|
||||
PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
|
||||
packet->header[2] = 0;
|
||||
packet->header_size = 12;
|
||||
packet->data_size = 0;
|
||||
}
|
||||
|
@ -801,12 +859,9 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc
|
|||
packet->data_size = length;
|
||||
}
|
||||
|
||||
#define PREP_REPLY_PACKET(length) \
|
||||
packet = create_reply_packet(host, data, length); \
|
||||
if (packet == NULL) break
|
||||
|
||||
static void handle_incoming_packet(struct hpsb_host *host, int tcode,
|
||||
quadlet_t *data, size_t size, int write_acked)
|
||||
quadlet_t *data, size_t size,
|
||||
int write_acked)
|
||||
{
|
||||
struct hpsb_packet *packet;
|
||||
int length, rcode, extcode;
|
||||
|
@ -816,74 +871,72 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
|
|||
u16 flags = (u16) data[0];
|
||||
u64 addr;
|
||||
|
||||
/* big FIXME - no error checking is done for an out of bounds length */
|
||||
/* FIXME?
|
||||
* Out-of-bounds lengths are left for highlevel_read|write to cap. */
|
||||
|
||||
switch (tcode) {
|
||||
case TCODE_WRITEQ:
|
||||
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
|
||||
rcode = highlevel_write(host, source, dest, data+3,
|
||||
rcode = highlevel_write(host, source, dest, data + 3,
|
||||
addr, 4, flags);
|
||||
|
||||
if (!write_acked
|
||||
&& (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
|
||||
&& (rcode >= 0)) {
|
||||
/* not a broadcast write, reply */
|
||||
PREP_REPLY_PACKET(0);
|
||||
fill_async_write_resp(packet, rcode);
|
||||
send_packet_nocare(packet);
|
||||
}
|
||||
break;
|
||||
goto handle_write_request;
|
||||
|
||||
case TCODE_WRITEB:
|
||||
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
|
||||
rcode = highlevel_write(host, source, dest, data+4,
|
||||
addr, data[3]>>16, flags);
|
||||
|
||||
if (!write_acked
|
||||
&& (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
|
||||
&& (rcode >= 0)) {
|
||||
/* not a broadcast write, reply */
|
||||
PREP_REPLY_PACKET(0);
|
||||
rcode = highlevel_write(host, source, dest, data + 4,
|
||||
addr, data[3] >> 16, flags);
|
||||
handle_write_request:
|
||||
if (rcode < 0 || write_acked ||
|
||||
NODEID_TO_NODE(data[0] >> 16) == NODE_MASK)
|
||||
return;
|
||||
/* not a broadcast write, reply */
|
||||
packet = create_reply_packet(host, data, 0);
|
||||
if (packet) {
|
||||
fill_async_write_resp(packet, rcode);
|
||||
send_packet_nocare(packet);
|
||||
}
|
||||
break;
|
||||
return;
|
||||
|
||||
case TCODE_READQ:
|
||||
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
|
||||
rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
|
||||
if (rcode < 0)
|
||||
return;
|
||||
|
||||
if (rcode >= 0) {
|
||||
PREP_REPLY_PACKET(0);
|
||||
packet = create_reply_packet(host, data, 0);
|
||||
if (packet) {
|
||||
fill_async_readquad_resp(packet, rcode, buffer);
|
||||
send_packet_nocare(packet);
|
||||
}
|
||||
break;
|
||||
return;
|
||||
|
||||
case TCODE_READB:
|
||||
length = data[3] >> 16;
|
||||
PREP_REPLY_PACKET(length);
|
||||
packet = create_reply_packet(host, data, length);
|
||||
if (!packet)
|
||||
return;
|
||||
|
||||
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
|
||||
rcode = highlevel_read(host, source, packet->data, addr,
|
||||
length, flags);
|
||||
|
||||
if (rcode >= 0) {
|
||||
fill_async_readblock_resp(packet, rcode, length);
|
||||
send_packet_nocare(packet);
|
||||
} else {
|
||||
if (rcode < 0) {
|
||||
hpsb_free_packet(packet);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
fill_async_readblock_resp(packet, rcode, length);
|
||||
send_packet_nocare(packet);
|
||||
return;
|
||||
|
||||
case TCODE_LOCK_REQUEST:
|
||||
length = data[3] >> 16;
|
||||
extcode = data[3] & 0xffff;
|
||||
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
|
||||
|
||||
PREP_REPLY_PACKET(8);
|
||||
packet = create_reply_packet(host, data, 8);
|
||||
if (!packet)
|
||||
return;
|
||||
|
||||
if ((extcode == 0) || (extcode >= 7)) {
|
||||
if (extcode == 0 || extcode >= 7) {
|
||||
/* let switch default handle error */
|
||||
length = 0;
|
||||
}
|
||||
|
@ -891,12 +944,12 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
|
|||
switch (length) {
|
||||
case 4:
|
||||
rcode = highlevel_lock(host, source, packet->data, addr,
|
||||
data[4], 0, extcode,flags);
|
||||
data[4], 0, extcode, flags);
|
||||
fill_async_lock_resp(packet, rcode, extcode, 4);
|
||||
break;
|
||||
case 8:
|
||||
if ((extcode != EXTCODE_FETCH_ADD)
|
||||
&& (extcode != EXTCODE_LITTLE_ADD)) {
|
||||
if (extcode != EXTCODE_FETCH_ADD &&
|
||||
extcode != EXTCODE_LITTLE_ADD) {
|
||||
rcode = highlevel_lock(host, source,
|
||||
packet->data, addr,
|
||||
data[5], data[4],
|
||||
|
@ -920,29 +973,38 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
|
|||
break;
|
||||
default:
|
||||
rcode = RCODE_TYPE_ERROR;
|
||||
fill_async_lock_resp(packet, rcode,
|
||||
extcode, 0);
|
||||
fill_async_lock_resp(packet, rcode, extcode, 0);
|
||||
}
|
||||
|
||||
if (rcode >= 0) {
|
||||
send_packet_nocare(packet);
|
||||
} else {
|
||||
if (rcode < 0)
|
||||
hpsb_free_packet(packet);
|
||||
}
|
||||
break;
|
||||
else
|
||||
send_packet_nocare(packet);
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
#undef PREP_REPLY_PACKET
|
||||
|
||||
|
||||
/**
|
||||
* hpsb_packet_received - hand over received packet to the core
|
||||
*
|
||||
* For host driver module usage.
|
||||
*
|
||||
* The contents of data are expected to be the full packet but with the CRCs
|
||||
* left out (data block follows header immediately), with the header (i.e. the
|
||||
* first four quadlets) in machine byte order and the data block in big endian.
|
||||
* *@data can be safely overwritten after this call.
|
||||
*
|
||||
* If the packet is a write request, @write_acked is to be set to true if it was
|
||||
* ack_complete'd already, false otherwise. This argument is ignored for any
|
||||
* other packet type.
|
||||
*/
|
||||
void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
|
||||
int write_acked)
|
||||
{
|
||||
int tcode;
|
||||
|
||||
if (host->in_bus_reset) {
|
||||
HPSB_INFO("received packet during reset; ignoring");
|
||||
if (unlikely(host->in_bus_reset)) {
|
||||
HPSB_DEBUG("received packet during reset; ignoring");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -976,23 +1038,27 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
|
|||
break;
|
||||
|
||||
default:
|
||||
HPSB_NOTICE("received packet with bogus transaction code %d",
|
||||
tcode);
|
||||
HPSB_DEBUG("received packet with bogus transaction code %d",
|
||||
tcode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void abort_requests(struct hpsb_host *host)
|
||||
{
|
||||
struct hpsb_packet *packet;
|
||||
struct sk_buff *skb;
|
||||
struct hpsb_packet *packet, *p;
|
||||
struct list_head tmp;
|
||||
unsigned long flags;
|
||||
|
||||
host->driver->devctl(host, CANCEL_REQUESTS, 0);
|
||||
|
||||
while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
|
||||
packet = (struct hpsb_packet *)skb->data;
|
||||
INIT_LIST_HEAD(&tmp);
|
||||
spin_lock_irqsave(&pending_packets_lock, flags);
|
||||
list_splice_init(&host->pending_packets, &tmp);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(packet, p, &tmp, queue) {
|
||||
list_del_init(&packet->queue);
|
||||
packet->state = hpsb_complete;
|
||||
packet->ack_code = ACKX_ABORTED;
|
||||
queue_packet_complete(packet);
|
||||
|
@ -1002,87 +1068,90 @@ static void abort_requests(struct hpsb_host *host)
|
|||
void abort_timedouts(unsigned long __opaque)
|
||||
{
|
||||
struct hpsb_host *host = (struct hpsb_host *)__opaque;
|
||||
unsigned long flags;
|
||||
struct hpsb_packet *packet;
|
||||
struct sk_buff *skb;
|
||||
unsigned long expire;
|
||||
struct hpsb_packet *packet, *p;
|
||||
struct list_head tmp;
|
||||
unsigned long flags, expire, j;
|
||||
|
||||
spin_lock_irqsave(&host->csr.lock, flags);
|
||||
expire = host->csr.expire;
|
||||
spin_unlock_irqrestore(&host->csr.lock, flags);
|
||||
|
||||
/* Hold the lock around this, since we aren't dequeuing all
|
||||
* packets, just ones we need. */
|
||||
spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
|
||||
j = jiffies;
|
||||
INIT_LIST_HEAD(&tmp);
|
||||
spin_lock_irqsave(&pending_packets_lock, flags);
|
||||
|
||||
while (!skb_queue_empty(&host->pending_packet_queue)) {
|
||||
skb = skb_peek(&host->pending_packet_queue);
|
||||
|
||||
packet = (struct hpsb_packet *)skb->data;
|
||||
|
||||
if (time_before(packet->sendtime + expire, jiffies)) {
|
||||
__skb_unlink(skb, &host->pending_packet_queue);
|
||||
packet->state = hpsb_complete;
|
||||
packet->ack_code = ACKX_TIMEOUT;
|
||||
queue_packet_complete(packet);
|
||||
} else {
|
||||
list_for_each_entry_safe(packet, p, &host->pending_packets, queue) {
|
||||
if (time_before(packet->sendtime + expire, j))
|
||||
list_move_tail(&packet->queue, &tmp);
|
||||
else
|
||||
/* Since packets are added to the tail, the oldest
|
||||
* ones are first, always. When we get to one that
|
||||
* isn't timed out, the rest aren't either. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!list_empty(&host->pending_packets))
|
||||
mod_timer(&host->timeout, j + host->timeout_interval);
|
||||
|
||||
if (!skb_queue_empty(&host->pending_packet_queue))
|
||||
mod_timer(&host->timeout, jiffies + host->timeout_interval);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
|
||||
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
|
||||
list_for_each_entry_safe(packet, p, &tmp, queue) {
|
||||
list_del_init(&packet->queue);
|
||||
packet->state = hpsb_complete;
|
||||
packet->ack_code = ACKX_TIMEOUT;
|
||||
queue_packet_complete(packet);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Kernel thread and vars, which handles packets that are completed. Only
|
||||
* packets that have a "complete" function are sent here. This way, the
|
||||
* completion is run out of kernel context, and doesn't block the rest of
|
||||
* the stack. */
|
||||
static struct task_struct *khpsbpkt_thread;
|
||||
static struct sk_buff_head hpsbpkt_queue;
|
||||
static LIST_HEAD(hpsbpkt_queue);
|
||||
|
||||
static void queue_packet_complete(struct hpsb_packet *packet)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (packet->no_waiter) {
|
||||
hpsb_free_packet(packet);
|
||||
return;
|
||||
}
|
||||
if (packet->complete_routine != NULL) {
|
||||
skb_queue_tail(&hpsbpkt_queue, packet->skb);
|
||||
spin_lock_irqsave(&pending_packets_lock, flags);
|
||||
list_add_tail(&packet->queue, &hpsbpkt_queue);
|
||||
spin_unlock_irqrestore(&pending_packets_lock, flags);
|
||||
wake_up_process(khpsbpkt_thread);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel thread which handles packets that are completed. This way the
|
||||
* packet's "complete" function is asynchronously run in process context.
|
||||
* Only packets which have a "complete" function may be sent here.
|
||||
*/
|
||||
static int hpsbpkt_thread(void *__hi)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct hpsb_packet *packet;
|
||||
void (*complete_routine)(void*);
|
||||
void *complete_data;
|
||||
struct hpsb_packet *packet, *p;
|
||||
struct list_head tmp;
|
||||
int may_schedule;
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
|
||||
packet = (struct hpsb_packet *)skb->data;
|
||||
|
||||
complete_routine = packet->complete_routine;
|
||||
complete_data = packet->complete_data;
|
||||
INIT_LIST_HEAD(&tmp);
|
||||
spin_lock_irq(&pending_packets_lock);
|
||||
list_splice_init(&hpsbpkt_queue, &tmp);
|
||||
spin_unlock_irq(&pending_packets_lock);
|
||||
|
||||
packet->complete_routine = packet->complete_data = NULL;
|
||||
|
||||
complete_routine(complete_data);
|
||||
list_for_each_entry_safe(packet, p, &tmp, queue) {
|
||||
list_del_init(&packet->queue);
|
||||
packet->complete_routine(packet->complete_data);
|
||||
}
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!skb_peek(&hpsbpkt_queue))
|
||||
spin_lock_irq(&pending_packets_lock);
|
||||
may_schedule = list_empty(&hpsbpkt_queue);
|
||||
spin_unlock_irq(&pending_packets_lock);
|
||||
if (may_schedule)
|
||||
schedule();
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
@ -1093,8 +1162,6 @@ static int __init ieee1394_init(void)
|
|||
{
|
||||
int i, ret;
|
||||
|
||||
skb_queue_head_init(&hpsbpkt_queue);
|
||||
|
||||
/* non-fatal error */
|
||||
if (hpsb_init_config_roms()) {
|
||||
HPSB_ERR("Failed to initialize some config rom entries.\n");
|
||||
|
@ -1268,7 +1335,6 @@ EXPORT_SYMBOL(hpsb_destroy_hostinfo);
|
|||
EXPORT_SYMBOL(hpsb_set_hostinfo_key);
|
||||
EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
|
||||
EXPORT_SYMBOL(hpsb_set_hostinfo);
|
||||
EXPORT_SYMBOL(highlevel_host_reset);
|
||||
|
||||
/** nodemgr.c **/
|
||||
EXPORT_SYMBOL(hpsb_node_fill_packet);
|
||||
|
@ -1311,11 +1377,10 @@ EXPORT_SYMBOL(hpsb_iso_wake);
|
|||
EXPORT_SYMBOL(hpsb_iso_recv_flush);
|
||||
|
||||
/** csr1212.c **/
|
||||
EXPORT_SYMBOL(csr1212_new_directory);
|
||||
EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
|
||||
EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
|
||||
EXPORT_SYMBOL(csr1212_release_keyval);
|
||||
EXPORT_SYMBOL(csr1212_read);
|
||||
EXPORT_SYMBOL(csr1212_get_keyval);
|
||||
EXPORT_SYMBOL(csr1212_new_directory);
|
||||
EXPORT_SYMBOL(csr1212_parse_keyval);
|
||||
EXPORT_SYMBOL(_csr1212_read_keyval);
|
||||
EXPORT_SYMBOL(_csr1212_destroy_keyval);
|
||||
EXPORT_SYMBOL(csr1212_read);
|
||||
EXPORT_SYMBOL(csr1212_release_keyval);
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -13,7 +12,7 @@
|
|||
|
||||
struct hpsb_packet {
|
||||
/* This struct is basically read-only for hosts with the exception of
|
||||
* the data buffer contents and xnext - see below. */
|
||||
* the data buffer contents and driver_list. */
|
||||
|
||||
/* This can be used for host driver internal linking.
|
||||
*
|
||||
|
@ -49,134 +48,65 @@ struct hpsb_packet {
|
|||
/* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
|
||||
unsigned speed_code:2;
|
||||
|
||||
/*
|
||||
* *header and *data are guaranteed to be 32-bit DMAable and may be
|
||||
* overwritten to allow in-place byte swapping. Neither of these is
|
||||
* CRCed (the sizes also don't include CRC), but contain space for at
|
||||
* least one additional quadlet to allow in-place CRCing. The memory is
|
||||
* also guaranteed to be DMA mappable.
|
||||
*/
|
||||
quadlet_t *header;
|
||||
quadlet_t *data;
|
||||
size_t header_size;
|
||||
size_t data_size;
|
||||
|
||||
struct hpsb_host *host;
|
||||
unsigned int generation;
|
||||
|
||||
atomic_t refcnt;
|
||||
struct list_head queue;
|
||||
|
||||
/* Function (and possible data to pass to it) to call when this
|
||||
* packet is completed. */
|
||||
void (*complete_routine)(void *);
|
||||
void *complete_data;
|
||||
|
||||
/* XXX This is just a hack at the moment */
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Store jiffies for implementing bus timeouts. */
|
||||
unsigned long sendtime;
|
||||
|
||||
quadlet_t embedded_header[5];
|
||||
/* Sizes are in bytes. *data can be DMA-mapped. */
|
||||
size_t allocated_data_size; /* as allocated */
|
||||
size_t data_size; /* as filled in */
|
||||
size_t header_size; /* as filled in, not counting the CRC */
|
||||
quadlet_t *data;
|
||||
quadlet_t header[5];
|
||||
quadlet_t embedded_data[0]; /* keep as last member */
|
||||
};
|
||||
|
||||
/* Set a task for when a packet completes */
|
||||
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
|
||||
void (*routine)(void *), void *data);
|
||||
|
||||
static inline struct hpsb_packet *driver_packet(struct list_head *l)
|
||||
{
|
||||
return list_entry(l, struct hpsb_packet, driver_list);
|
||||
}
|
||||
|
||||
void abort_timedouts(unsigned long __opaque);
|
||||
|
||||
struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
|
||||
void hpsb_free_packet(struct hpsb_packet *packet);
|
||||
|
||||
/*
|
||||
* Generation counter for the complete 1394 subsystem. Generation gets
|
||||
* incremented on every change in the subsystem (e.g. bus reset).
|
||||
/**
|
||||
* get_hpsb_generation - generation counter for the complete 1394 subsystem
|
||||
*
|
||||
* Use the functions, not the variable.
|
||||
* Generation gets incremented on every change in the subsystem (notably on bus
|
||||
* resets). Use the functions, not the variable.
|
||||
*/
|
||||
static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
|
||||
{
|
||||
return atomic_read(&host->generation);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a PHY configuration packet, return 0 on success, negative
|
||||
* errno on failure.
|
||||
*/
|
||||
int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
|
||||
|
||||
/*
|
||||
* Queue packet for transmitting, return 0 on success, negative errno
|
||||
* on failure.
|
||||
*/
|
||||
int hpsb_send_packet(struct hpsb_packet *packet);
|
||||
|
||||
/*
|
||||
* Queue packet for transmitting, and block until the transaction
|
||||
* completes. Return 0 on success, negative errno on failure.
|
||||
*/
|
||||
int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
|
||||
|
||||
/* Initiate bus reset on the given host. Returns 1 if bus reset already in
|
||||
* progress, 0 otherwise. */
|
||||
int hpsb_reset_bus(struct hpsb_host *host, int type);
|
||||
|
||||
int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
|
||||
u64 *local_time);
|
||||
|
||||
/*
|
||||
* The following functions are exported for host driver module usage. All of
|
||||
* them are safe to use in interrupt contexts, although some are quite
|
||||
* complicated so you may want to run them in bottom halves instead of calling
|
||||
* them directly.
|
||||
*/
|
||||
|
||||
/* Notify a bus reset to the core. Returns 1 if bus reset already in progress,
|
||||
* 0 otherwise. */
|
||||
int hpsb_bus_reset(struct hpsb_host *host);
|
||||
|
||||
/*
|
||||
* Hand over received selfid packet to the core. Complement check (second
|
||||
* quadlet is complement of first) is expected to be done and successful.
|
||||
*/
|
||||
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
|
||||
|
||||
/*
|
||||
* Notify completion of SelfID stage to the core and report new physical ID
|
||||
* and whether host is root now.
|
||||
*/
|
||||
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
|
||||
|
||||
/*
|
||||
* Notify core of sending a packet. Ackcode is the ack code returned for async
|
||||
* transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
|
||||
* for other cases (internal errors that don't justify a panic). Safe to call
|
||||
* from within a transmit packet routine.
|
||||
*/
|
||||
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
|
||||
int ackcode);
|
||||
|
||||
/*
|
||||
* Hand over received packet to the core. The contents of data are expected to
|
||||
* be the full packet but with the CRCs left out (data block follows header
|
||||
* immediately), with the header (i.e. the first four quadlets) in machine byte
|
||||
* order and the data block in big endian. *data can be safely overwritten
|
||||
* after this call.
|
||||
*
|
||||
* If the packet is a write request, write_acked is to be set to true if it was
|
||||
* ack_complete'd already, false otherwise. This arg is ignored for any other
|
||||
* packet type.
|
||||
*/
|
||||
void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
|
||||
int write_acked);
|
||||
|
||||
|
||||
/*
|
||||
* CHARACTER DEVICE DISPATCHING
|
||||
*
|
||||
|
@ -217,7 +147,9 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
|
|||
#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \
|
||||
IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
|
||||
|
||||
/* return the index (within a minor number block) of a file */
|
||||
/**
|
||||
* ieee1394_file_to_instance - get the index within a minor number block
|
||||
*/
|
||||
static inline unsigned char ieee1394_file_to_instance(struct file *file)
|
||||
{
|
||||
return file->f_path.dentry->d_inode->i_cindex;
|
||||
|
|
|
@ -10,11 +10,16 @@
|
|||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sched.h> /* because linux/wait.h is broken if CONFIG_SMP=n */
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <asm/bug.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "ieee1394.h"
|
||||
#include "ieee1394_types.h"
|
||||
|
@ -32,7 +37,7 @@
|
|||
#ifndef HPSB_DEBUG_TLABELS
|
||||
static
|
||||
#endif
|
||||
spinlock_t hpsb_tlabel_lock = SPIN_LOCK_UNLOCKED;
|
||||
DEFINE_SPINLOCK(hpsb_tlabel_lock);
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
|
||||
|
||||
|
@ -212,6 +217,15 @@ void hpsb_free_tlabel(struct hpsb_packet *packet)
|
|||
wake_up_interruptible(&tlabel_wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_packet_success - Make sense of the ack and reply codes
|
||||
*
|
||||
* Make sense of the ack and reply codes and return more convenient error codes:
|
||||
* 0 = success. -%EBUSY = node is busy, try again. -%EAGAIN = error which can
|
||||
* probably resolved by retry. -%EREMOTEIO = node suffers from an internal
|
||||
* error. -%EACCES = this transaction is not allowed on requested address.
|
||||
* -%EINVAL = invalid address at node.
|
||||
*/
|
||||
int hpsb_packet_success(struct hpsb_packet *packet)
|
||||
{
|
||||
switch (packet->ack_code) {
|
||||
|
@ -364,6 +378,13 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
|
|||
}
|
||||
packet->host = host;
|
||||
|
||||
/* Because it is too difficult to determine all PHY speeds and link
|
||||
* speeds here, we use S100... */
|
||||
packet->speed_code = IEEE1394_SPEED_100;
|
||||
|
||||
/* ...and prevent hpsb_send_packet() from overriding it. */
|
||||
packet->node_id = LOCAL_BUS | ALL_NODES;
|
||||
|
||||
if (hpsb_get_tlabel(packet)) {
|
||||
hpsb_free_packet(packet);
|
||||
return NULL;
|
||||
|
@ -493,6 +514,16 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
|
|||
* avoid in kernel buffers for user space callers
|
||||
*/
|
||||
|
||||
/**
|
||||
* hpsb_read - generic read function
|
||||
*
|
||||
* Recognizes the local node ID and act accordingly. Automatically uses a
|
||||
* quadlet read request if @length == 4 and and a block read request otherwise.
|
||||
* It does not yet support lengths that are not a multiple of 4.
|
||||
*
|
||||
* You must explicitly specifiy the @generation for which the node ID is valid,
|
||||
* to avoid sending packets to the wrong nodes when we race with a bus reset.
|
||||
*/
|
||||
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
|
||||
u64 addr, quadlet_t * buffer, size_t length)
|
||||
{
|
||||
|
@ -532,6 +563,16 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
|
|||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_write - generic write function
|
||||
*
|
||||
* Recognizes the local node ID and act accordingly. Automatically uses a
|
||||
* quadlet write request if @length == 4 and and a block write request
|
||||
* otherwise. It does not yet support lengths that are not a multiple of 4.
|
||||
*
|
||||
* You must explicitly specifiy the @generation for which the node ID is valid,
|
||||
* to avoid sending packets to the wrong nodes when we race with a bus reset.
|
||||
*/
|
||||
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
|
||||
u64 addr, quadlet_t * buffer, size_t length)
|
||||
{
|
||||
|
|
|
@ -27,27 +27,7 @@ struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
|
|||
struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
|
||||
int length, int channel, int tag,
|
||||
int sync);
|
||||
|
||||
/*
|
||||
* hpsb_packet_success - Make sense of the ack and reply codes and
|
||||
* return more convenient error codes:
|
||||
* 0 success
|
||||
* -EBUSY node is busy, try again
|
||||
* -EAGAIN error which can probably resolved by retry
|
||||
* -EREMOTEIO node suffers from an internal error
|
||||
* -EACCES this transaction is not allowed on requested address
|
||||
* -EINVAL invalid address at node
|
||||
*/
|
||||
int hpsb_packet_success(struct hpsb_packet *packet);
|
||||
|
||||
/*
|
||||
* The generic read and write functions. All recognize the local node ID
|
||||
* and act accordingly. Read and write automatically use quadlet commands if
|
||||
* length == 4 and and block commands otherwise (however, they do not yet
|
||||
* support lengths that are not a multiple of 4). You must explicitly specifiy
|
||||
* the generation for which the node ID is valid, to avoid sending packets to
|
||||
* the wrong nodes when we race with a bus reset.
|
||||
*/
|
||||
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
|
||||
u64 addr, quadlet_t *buffer, size_t length);
|
||||
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
|
||||
|
|
|
@ -10,11 +10,15 @@
|
|||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "hosts.h"
|
||||
#include "iso.h"
|
||||
|
||||
/**
|
||||
* hpsb_iso_stop - stop DMA
|
||||
*/
|
||||
void hpsb_iso_stop(struct hpsb_iso *iso)
|
||||
{
|
||||
if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
|
||||
|
@ -25,6 +29,9 @@ void hpsb_iso_stop(struct hpsb_iso *iso)
|
|||
iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_shutdown - deallocate buffer and DMA context
|
||||
*/
|
||||
void hpsb_iso_shutdown(struct hpsb_iso *iso)
|
||||
{
|
||||
if (iso->flags & HPSB_ISO_DRIVER_INIT) {
|
||||
|
@ -130,6 +137,9 @@ static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_n_ready - returns number of packets ready to send or receive
|
||||
*/
|
||||
int hpsb_iso_n_ready(struct hpsb_iso *iso)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -142,6 +152,9 @@ int hpsb_iso_n_ready(struct hpsb_iso *iso)
|
|||
return val;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_xmit_init - allocate the buffer and DMA context
|
||||
*/
|
||||
struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
|
||||
unsigned int data_buf_size,
|
||||
unsigned int buf_packets,
|
||||
|
@ -172,6 +185,11 @@ struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_init - allocate the buffer and DMA context
|
||||
*
|
||||
* Note, if channel = -1, multi-channel receive is enabled.
|
||||
*/
|
||||
struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
|
||||
unsigned int data_buf_size,
|
||||
unsigned int buf_packets,
|
||||
|
@ -199,6 +217,11 @@ struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_listen_channel
|
||||
*
|
||||
* multi-channel only
|
||||
*/
|
||||
int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
|
||||
{
|
||||
if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
|
||||
|
@ -206,6 +229,11 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
|
|||
return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_unlisten_channel
|
||||
*
|
||||
* multi-channel only
|
||||
*/
|
||||
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
|
||||
{
|
||||
if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
|
||||
|
@ -213,6 +241,11 @@ int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
|
|||
return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_set_channel_mask
|
||||
*
|
||||
* multi-channel only
|
||||
*/
|
||||
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
|
||||
{
|
||||
if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
|
||||
|
@ -221,6 +254,12 @@ int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
|
|||
(unsigned long)&mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_flush - check for arrival of new packets
|
||||
*
|
||||
* check for arrival of new packets immediately (even if irq_interval
|
||||
* has not yet been reached)
|
||||
*/
|
||||
int hpsb_iso_recv_flush(struct hpsb_iso *iso)
|
||||
{
|
||||
if (iso->type != HPSB_ISO_RECV)
|
||||
|
@ -238,6 +277,9 @@ static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
|
|||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_xmit_start - start DMA
|
||||
*/
|
||||
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
|
||||
{
|
||||
if (iso->type != HPSB_ISO_XMIT)
|
||||
|
@ -270,6 +312,9 @@ int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_start - start DMA
|
||||
*/
|
||||
int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
|
||||
{
|
||||
int retval = 0;
|
||||
|
@ -306,8 +351,7 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
|
|||
}
|
||||
|
||||
/* check to make sure the user has not supplied bogus values of offset/len
|
||||
that would cause the kernel to access memory outside the buffer */
|
||||
|
||||
* that would cause the kernel to access memory outside the buffer */
|
||||
static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
|
||||
unsigned int offset, unsigned short len,
|
||||
unsigned int *out_offset,
|
||||
|
@ -331,6 +375,12 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_xmit_queue_packet - queue a packet for transmission.
|
||||
*
|
||||
* @offset is relative to the beginning of the DMA buffer, where the packet's
|
||||
* data payload should already have been placed.
|
||||
*/
|
||||
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
|
||||
u8 tag, u8 sy)
|
||||
{
|
||||
|
@ -380,6 +430,9 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
|
|||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_xmit_sync - wait until all queued packets have been transmitted
|
||||
*/
|
||||
int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
|
||||
{
|
||||
if (iso->type != HPSB_ISO_XMIT)
|
||||
|
@ -390,6 +443,15 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
|
|||
iso->buf_packets);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_packet_sent
|
||||
*
|
||||
* Available to low-level drivers.
|
||||
*
|
||||
* Call after a packet has been transmitted to the bus (interrupt context is
|
||||
* OK). @cycle is the _exact_ cycle the packet was sent on. @error should be
|
||||
* non-zero if some sort of error occurred when sending the packet.
|
||||
*/
|
||||
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -413,6 +475,13 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
|
|||
spin_unlock_irqrestore(&iso->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_packet_received
|
||||
*
|
||||
* Available to low-level drivers.
|
||||
*
|
||||
* Call after a packet has been received (interrupt context is OK).
|
||||
*/
|
||||
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
|
||||
u16 total_len, u16 cycle, u8 channel, u8 tag,
|
||||
u8 sy)
|
||||
|
@ -442,6 +511,11 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
|
|||
spin_unlock_irqrestore(&iso->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_recv_release_packets - release packets, reuse buffer
|
||||
*
|
||||
* @n_packets have been read out of the buffer, re-use the buffer space
|
||||
*/
|
||||
int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -477,6 +551,13 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
|
|||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpsb_iso_wake
|
||||
*
|
||||
* Available to low-level drivers.
|
||||
*
|
||||
* Call to wake waiting processes after buffer space has opened up.
|
||||
*/
|
||||
void hpsb_iso_wake(struct hpsb_iso *iso)
|
||||
{
|
||||
wake_up_interruptible(&iso->waitq);
|
||||
|
|
|
@ -150,8 +150,6 @@ struct hpsb_iso {
|
|||
|
||||
/* functions available to high-level drivers (e.g. raw1394) */
|
||||
|
||||
/* allocate the buffer and DMA context */
|
||||
|
||||
struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
|
||||
unsigned int data_buf_size,
|
||||
unsigned int buf_packets,
|
||||
|
@ -159,8 +157,6 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
|
|||
int speed,
|
||||
int irq_interval,
|
||||
void (*callback)(struct hpsb_iso*));
|
||||
|
||||
/* note: if channel = -1, multi-channel receive is enabled */
|
||||
struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
|
||||
unsigned int data_buf_size,
|
||||
unsigned int buf_packets,
|
||||
|
@ -168,56 +164,29 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
|
|||
int dma_mode,
|
||||
int irq_interval,
|
||||
void (*callback)(struct hpsb_iso*));
|
||||
|
||||
/* multi-channel only */
|
||||
int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
|
||||
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
|
||||
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
|
||||
|
||||
/* start/stop DMA */
|
||||
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle,
|
||||
int prebuffer);
|
||||
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle,
|
||||
int tag_mask, int sync);
|
||||
void hpsb_iso_stop(struct hpsb_iso *iso);
|
||||
|
||||
/* deallocate buffer and DMA context */
|
||||
void hpsb_iso_shutdown(struct hpsb_iso *iso);
|
||||
|
||||
/* queue a packet for transmission.
|
||||
* 'offset' is relative to the beginning of the DMA buffer, where the packet's
|
||||
* data payload should already have been placed. */
|
||||
int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
|
||||
u8 tag, u8 sy);
|
||||
|
||||
/* wait until all queued packets have been transmitted to the bus */
|
||||
int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
|
||||
|
||||
/* N packets have been read out of the buffer, re-use the buffer space */
|
||||
int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
|
||||
unsigned int n_packets);
|
||||
|
||||
/* check for arrival of new packets immediately (even if irq_interval
|
||||
* has not yet been reached) */
|
||||
int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
|
||||
unsigned int n_packets);
|
||||
int hpsb_iso_recv_flush(struct hpsb_iso *iso);
|
||||
|
||||
/* returns # of packets ready to send or receive */
|
||||
int hpsb_iso_n_ready(struct hpsb_iso *iso);
|
||||
|
||||
/* the following are callbacks available to low-level drivers */
|
||||
|
||||
/* call after a packet has been transmitted to the bus (interrupt context is OK)
|
||||
* 'cycle' is the _exact_ cycle the packet was sent on
|
||||
* 'error' should be non-zero if some sort of error occurred when sending the
|
||||
* packet */
|
||||
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
|
||||
|
||||
/* call after a packet has been received (interrupt context OK) */
|
||||
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
|
||||
u16 total_len, u16 cycle, u8 channel, u8 tag,
|
||||
u8 sy);
|
||||
|
||||
/* call to wake waiting processes after buffer space has opened up. */
|
||||
void hpsb_iso_wake(struct hpsb_iso *iso);
|
||||
|
||||
#endif /* IEEE1394_ISO_H */
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -115,7 +116,7 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
|
|||
|
||||
static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
|
||||
{
|
||||
return (CSR1212_BE32_TO_CPU(bus_info_data[2]) >> 8) & 0x3;
|
||||
return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
|
||||
}
|
||||
|
||||
static struct csr1212_bus_ops nodemgr_csr_ops = {
|
||||
|
@ -580,7 +581,7 @@ static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
|
|||
goto fail;
|
||||
return;
|
||||
fail:
|
||||
HPSB_ERR("Failed to add sysfs attribute for driver %s", driver->name);
|
||||
HPSB_ERR("Failed to add sysfs attribute");
|
||||
}
|
||||
|
||||
|
||||
|
@ -604,8 +605,7 @@ static void nodemgr_create_ne_dev_files(struct node_entry *ne)
|
|||
goto fail;
|
||||
return;
|
||||
fail:
|
||||
HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
|
||||
(unsigned long long)ne->guid);
|
||||
HPSB_ERR("Failed to add sysfs attribute");
|
||||
}
|
||||
|
||||
|
||||
|
@ -619,7 +619,7 @@ static void nodemgr_create_host_dev_files(struct hpsb_host *host)
|
|||
goto fail;
|
||||
return;
|
||||
fail:
|
||||
HPSB_ERR("Failed to add sysfs attribute for host %d", host->id);
|
||||
HPSB_ERR("Failed to add sysfs attribute");
|
||||
}
|
||||
|
||||
|
||||
|
@ -679,8 +679,7 @@ static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
|
|||
}
|
||||
return;
|
||||
fail:
|
||||
HPSB_ERR("Failed to add sysfs attributes for unit %s",
|
||||
ud->device.bus_id);
|
||||
HPSB_ERR("Failed to add sysfs attribute");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1144,13 +1143,13 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
|
|||
last_key_id = kv->key.id;
|
||||
}
|
||||
|
||||
if (ne->vendor_name_kv &&
|
||||
device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv))
|
||||
goto fail;
|
||||
return;
|
||||
fail:
|
||||
HPSB_ERR("Failed to add sysfs attribute for node %016Lx",
|
||||
(unsigned long long)ne->guid);
|
||||
if (ne->vendor_name_kv) {
|
||||
int error = device_create_file(&ne->device,
|
||||
&dev_attr_ne_vendor_name_kv);
|
||||
|
||||
if (error && error != -EEXIST)
|
||||
HPSB_ERR("Failed to add sysfs attribute");
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
|
@ -1738,7 +1737,19 @@ exit:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
|
||||
/**
|
||||
* nodemgr_for_each_host - call a function for each IEEE 1394 host
|
||||
* @data: an address to supply to the callback
|
||||
* @cb: function to call for each host
|
||||
*
|
||||
* Iterate the hosts, calling a given function with supplied data for each host.
|
||||
* If the callback fails on a host, i.e. if it returns a non-zero value, the
|
||||
* iteration is stopped.
|
||||
*
|
||||
* Return value: 0 on success, non-zero on failure (same as returned by last run
|
||||
* of the callback).
|
||||
*/
|
||||
int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
|
||||
{
|
||||
struct class_device *cdev;
|
||||
struct hpsb_host *host;
|
||||
|
@ -1748,7 +1759,7 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
|
|||
list_for_each_entry(cdev, &hpsb_host_class.children, node) {
|
||||
host = container_of(cdev, struct hpsb_host, class_dev);
|
||||
|
||||
if ((error = cb(host, __data)))
|
||||
if ((error = cb(host, data)))
|
||||
break;
|
||||
}
|
||||
up(&hpsb_host_class.sem);
|
||||
|
@ -1756,7 +1767,7 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
|
|||
return error;
|
||||
}
|
||||
|
||||
/* The following four convenience functions use a struct node_entry
|
||||
/* The following two convenience functions use a struct node_entry
|
||||
* for addressing a node on the bus. They are intended for use by any
|
||||
* process context, not just the nodemgr thread, so we need to be a
|
||||
* little careful when reading out the node ID and generation. The
|
||||
|
@ -1771,12 +1782,20 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
|
|||
* ID's.
|
||||
*/
|
||||
|
||||
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt)
|
||||
/**
|
||||
* hpsb_node_fill_packet - fill some destination information into a packet
|
||||
* @ne: destination node
|
||||
* @packet: packet to fill in
|
||||
*
|
||||
* This will fill in the given, pre-initialised hpsb_packet with the current
|
||||
* information from the node entry (host, node ID, bus generation number).
|
||||
*/
|
||||
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
|
||||
{
|
||||
pkt->host = ne->host;
|
||||
pkt->generation = ne->generation;
|
||||
packet->host = ne->host;
|
||||
packet->generation = ne->generation;
|
||||
barrier();
|
||||
pkt->node_id = ne->nodeid;
|
||||
packet->node_id = ne->nodeid;
|
||||
}
|
||||
|
||||
int hpsb_node_write(struct node_entry *ne, u64 addr,
|
||||
|
|
|
@ -153,30 +153,10 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
|
|||
{
|
||||
return ne->generation == get_hpsb_generation(ne->host);
|
||||
}
|
||||
|
||||
/*
|
||||
* This will fill in the given, pre-initialised hpsb_packet with the current
|
||||
* information from the node entry (host, node ID, generation number). It will
|
||||
* return false if the node owning the GUID is not accessible (and not modify
|
||||
* the hpsb_packet) and return true otherwise.
|
||||
*
|
||||
* Note that packet sending may still fail in hpsb_send_packet if a bus reset
|
||||
* happens while you are trying to set up the packet (due to obsolete generation
|
||||
* number). It will at least reliably fail so that you don't accidentally and
|
||||
* unknowingly send your packet to the wrong node.
|
||||
*/
|
||||
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
|
||||
|
||||
int hpsb_node_read(struct node_entry *ne, u64 addr,
|
||||
quadlet_t *buffer, size_t length);
|
||||
void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
|
||||
int hpsb_node_write(struct node_entry *ne, u64 addr,
|
||||
quadlet_t *buffer, size_t length);
|
||||
int hpsb_node_lock(struct node_entry *ne, u64 addr,
|
||||
int extcode, quadlet_t *data, quadlet_t arg);
|
||||
|
||||
/* Iterate the hosts, calling a given function with supplied data for each
|
||||
* host. */
|
||||
int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *));
|
||||
int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
|
||||
|
||||
int init_ieee1394_nodemgr(void);
|
||||
void cleanup_ieee1394_nodemgr(void);
|
||||
|
|
|
@ -507,9 +507,8 @@ static void ohci_initialize(struct ti_ohci *ohci)
|
|||
/* Set up self-id dma buffer */
|
||||
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
|
||||
|
||||
/* enable self-id and phys */
|
||||
reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
|
||||
OHCI1394_LinkControl_RcvPhyPkt);
|
||||
/* enable self-id */
|
||||
reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
|
||||
|
||||
/* Set the Config ROM mapping register */
|
||||
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
|
||||
|
@ -518,9 +517,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
|
|||
ohci->max_packet_size =
|
||||
1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
|
||||
|
||||
/* Don't accept phy packets into AR request context */
|
||||
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
|
||||
|
||||
/* Clear the interrupt mask */
|
||||
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
|
||||
|
@ -617,7 +613,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
|
|||
#endif
|
||||
|
||||
PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
|
||||
"attempting to setting max_packet_size to 512 bytes");
|
||||
"attempting to set max_packet_size to 512 bytes");
|
||||
reg_write(ohci, OHCI1394_BusOptions,
|
||||
(reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
|
||||
ohci->max_packet_size = 512;
|
||||
|
@ -2377,6 +2373,7 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
|
|||
if (event & OHCI1394_postedWriteErr) {
|
||||
PRINT(KERN_ERR, "physical posted write error");
|
||||
/* no recovery strategy yet, had to involve protocol drivers */
|
||||
event &= ~OHCI1394_postedWriteErr;
|
||||
}
|
||||
if (event & OHCI1394_cycleTooLong) {
|
||||
if(printk_ratelimit())
|
||||
|
@ -3658,6 +3655,7 @@ static struct pci_driver ohci1394_pci_driver = {
|
|||
/* essentially the only purpose of this code is to allow another
|
||||
module to hook into ohci's interrupt handler */
|
||||
|
||||
/* returns zero if successful, one if DMA context is locked up */
|
||||
int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
|
||||
{
|
||||
int i=0;
|
||||
|
|
|
@ -461,9 +461,7 @@ int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
|
|||
struct ohci1394_iso_tasklet *tasklet);
|
||||
void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
|
||||
struct ohci1394_iso_tasklet *tasklet);
|
||||
|
||||
/* returns zero if successful, one if DMA context is locked up */
|
||||
int ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
|
||||
int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
|
||||
struct ti_ohci *ohci1394_get_struct(int card_num);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -938,7 +938,8 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
|
|||
int header_length = req->req.misc & 0xffff;
|
||||
int expect_response = req->req.misc >> 16;
|
||||
|
||||
if ((header_length > req->req.length) || (header_length < 12)) {
|
||||
if (header_length > req->req.length || header_length < 12 ||
|
||||
header_length > FIELD_SIZEOF(struct hpsb_packet, header)) {
|
||||
req->req.error = RAW1394_ERROR_INVALID_ARG;
|
||||
req->req.length = 0;
|
||||
queue_complete_req(req);
|
||||
|
|
|
@ -59,8 +59,10 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stat.h>
|
||||
|
@ -469,19 +471,13 @@ static void sbp2util_write_doorbell(struct work_struct *work)
|
|||
static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
|
||||
{
|
||||
struct sbp2_fwhost_info *hi = lu->hi;
|
||||
int i;
|
||||
unsigned long flags, orbs;
|
||||
struct sbp2_command_info *cmd;
|
||||
int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
|
||||
|
||||
orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
|
||||
|
||||
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
|
||||
for (i = 0; i < orbs; i++) {
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
if (!cmd) {
|
||||
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
|
||||
&cmd->command_orb,
|
||||
sizeof(struct sbp2_command_orb),
|
||||
|
@ -489,11 +485,10 @@ static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
|
|||
cmd->sge_dma = dma_map_single(hi->host->device.parent,
|
||||
&cmd->scatter_gather_element,
|
||||
sizeof(cmd->scatter_gather_element),
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
INIT_LIST_HEAD(&cmd->list);
|
||||
list_add_tail(&cmd->list, &lu->cmd_orb_completed);
|
||||
}
|
||||
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -514,7 +509,7 @@ static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
|
|||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(host->device.parent, cmd->sge_dma,
|
||||
sizeof(cmd->scatter_gather_element),
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
kfree(cmd);
|
||||
}
|
||||
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
|
||||
|
@ -757,6 +752,11 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
|
|||
SBP2_ERR("failed to register lower 4GB address range");
|
||||
goto failed_alloc;
|
||||
}
|
||||
#else
|
||||
if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) {
|
||||
SBP2_ERR("failed to set 4GB DMA mask");
|
||||
goto failed_alloc;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -865,11 +865,8 @@ static int sbp2_start_device(struct sbp2_lu *lu)
|
|||
if (!lu->login_orb)
|
||||
goto alloc_fail;
|
||||
|
||||
if (sbp2util_create_command_orb_pool(lu)) {
|
||||
SBP2_ERR("sbp2util_create_command_orb_pool failed!");
|
||||
sbp2_remove_device(lu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (sbp2util_create_command_orb_pool(lu))
|
||||
goto alloc_fail;
|
||||
|
||||
/* Wait a second before trying to log in. Previously logged in
|
||||
* initiators need a chance to reconnect. */
|
||||
|
@ -1628,7 +1625,7 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
|
|||
DMA_TO_DEVICE);
|
||||
dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
|
||||
sizeof(cmd->scatter_gather_element),
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* check to see if there are any previous orbs to use */
|
||||
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
|
||||
|
@ -1794,7 +1791,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
|
|||
DMA_TO_DEVICE);
|
||||
dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
|
||||
sizeof(cmd->scatter_gather_element),
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
/* Grab SCSI command pointers and check status. */
|
||||
/*
|
||||
* FIXME: If the src field in the status is 1, the ORB DMA must
|
||||
|
@ -1926,7 +1923,7 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
|
|||
DMA_TO_DEVICE);
|
||||
dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
|
||||
sizeof(cmd->scatter_gather_element),
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
sbp2util_mark_command_completed(lu, cmd);
|
||||
if (cmd->Current_SCpnt) {
|
||||
cmd->Current_SCpnt->result = status << 16;
|
||||
|
@ -2057,7 +2054,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
|
|||
dma_sync_single_for_cpu(hi->host->device.parent,
|
||||
cmd->sge_dma,
|
||||
sizeof(cmd->scatter_gather_element),
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
sbp2util_mark_command_completed(lu, cmd);
|
||||
if (cmd->Current_SCpnt) {
|
||||
cmd->Current_SCpnt->result = DID_ABORT << 16;
|
||||
|
|
|
@ -250,15 +250,15 @@ enum sbp2_dma_types {
|
|||
/* Per SCSI command */
|
||||
struct sbp2_command_info {
|
||||
struct list_head list;
|
||||
struct sbp2_command_orb command_orb ____cacheline_aligned;
|
||||
dma_addr_t command_orb_dma ____cacheline_aligned;
|
||||
struct sbp2_command_orb command_orb;
|
||||
dma_addr_t command_orb_dma;
|
||||
struct scsi_cmnd *Current_SCpnt;
|
||||
void (*Current_done)(struct scsi_cmnd *);
|
||||
|
||||
/* Also need s/g structure for each sbp2 command */
|
||||
struct sbp2_unrestricted_page_table
|
||||
scatter_gather_element[SG_ALL] ____cacheline_aligned;
|
||||
dma_addr_t sge_dma ____cacheline_aligned;
|
||||
scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
|
||||
dma_addr_t sge_dma;
|
||||
void *sge_buffer;
|
||||
dma_addr_t cmd_dma;
|
||||
enum sbp2_dma_types dma_type;
|
||||
|
|
Loading…
Reference in New Issue