Merge branch 'master' of /home/src/linux-2.6/
This commit is contained in:
commit
e8c2cd99a3
|
@ -0,0 +1,73 @@
|
|||
Device-mapper snapshot support
|
||||
==============================
|
||||
|
||||
Device-mapper allows you, without massive data copying:
|
||||
|
||||
*) To create snapshots of any block device i.e. mountable, saved states of
|
||||
the block device which are also writable without interfering with the
|
||||
original content;
|
||||
*) To create device "forks", i.e. multiple different versions of the
|
||||
same data stream.
|
||||
|
||||
|
||||
In both cases, dm copies only the chunks of data that get changed and
|
||||
uses a separate copy-on-write (COW) block device for storage.
|
||||
|
||||
|
||||
There are two dm targets available: snapshot and snapshot-origin.
|
||||
|
||||
*) snapshot-origin <origin>
|
||||
|
||||
which will normally have one or more snapshots based on it.
|
||||
You must create the snapshot-origin device before you can create snapshots.
|
||||
Reads will be mapped directly to the backing device. For each write, the
|
||||
original data will be saved in the <COW device> of each snapshot to keep
|
||||
its visible content unchanged, at least until the <COW device> fills up.
|
||||
|
||||
|
||||
*) snapshot <origin> <COW device> <persistent?> <chunksize>
|
||||
|
||||
A snapshot is created of the <origin> block device. Changed chunks of
|
||||
<chunksize> sectors will be stored on the <COW device>. Writes will
|
||||
only go to the <COW device>. Reads will come from the <COW device> or
|
||||
from <origin> for unchanged data. <COW device> will often be
|
||||
smaller than the origin and if it fills up the snapshot will become
|
||||
useless and be disabled, returning errors. So it is important to monitor
|
||||
the amount of free space and expand the <COW device> before it fills up.
|
||||
|
||||
<persistent?> is P (Persistent) or N (Not persistent - will not survive
|
||||
after reboot).
|
||||
|
||||
|
||||
How this is used by LVM2
|
||||
========================
|
||||
When you create the first LVM2 snapshot of a volume, four dm devices are used:
|
||||
|
||||
1) a device containing the original mapping table of the source volume;
|
||||
2) a device used as the <COW device>;
|
||||
3) a "snapshot" device, combining #1 and #2, which is the visible snapshot
|
||||
volume;
|
||||
4) the "original" volume (which uses the device number used by the original
|
||||
source volume), whose table is replaced by a "snapshot-origin" mapping
|
||||
from device #1.
|
||||
|
||||
A fixed naming scheme is used, so with the following commands:
|
||||
|
||||
lvcreate -L 1G -n base volumeGroup
|
||||
lvcreate -L 100M --snapshot -n snap volumeGroup/base
|
||||
|
||||
we'll have this situation (with volumes in above order):
|
||||
|
||||
# dmsetup table|grep volumeGroup
|
||||
|
||||
volumeGroup-base-real: 0 2097152 linear 8:19 384
|
||||
volumeGroup-snap-cow: 0 204800 linear 8:19 2097536
|
||||
volumeGroup-snap: 0 2097152 snapshot 254:11 254:12 P 16
|
||||
volumeGroup-base: 0 2097152 snapshot-origin 254:11
|
||||
|
||||
# ls -lL /dev/mapper/volumeGroup-*
|
||||
brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real
|
||||
brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow
|
||||
brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap
|
||||
brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base
|
||||
|
|
@ -51,9 +51,9 @@ or you don't get any checking at all.
|
|||
Where to get sparse
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
With BK, you can just get it from
|
||||
With git, you can just get it from
|
||||
|
||||
bk://sparse.bkbits.net/sparse
|
||||
rsync://rsync.kernel.org/pub/scm/devel/sparse/sparse.git
|
||||
|
||||
and DaveJ has tar-balls at
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
Revised: 2000-Dec-05.
|
||||
Again: 2002-Jul-06
|
||||
Again: 2005-Sep-19
|
||||
|
||||
NOTE:
|
||||
|
||||
|
@ -18,8 +19,8 @@ called USB Request Block, or URB for short.
|
|||
and deliver the data and status back.
|
||||
|
||||
- Execution of an URB is inherently an asynchronous operation, i.e. the
|
||||
usb_submit_urb(urb) call returns immediately after it has successfully queued
|
||||
the requested action.
|
||||
usb_submit_urb(urb) call returns immediately after it has successfully
|
||||
queued the requested action.
|
||||
|
||||
- Transfers for one URB can be canceled with usb_unlink_urb(urb) at any time.
|
||||
|
||||
|
@ -94,8 +95,9 @@ To free an URB, use
|
|||
|
||||
void usb_free_urb(struct urb *urb)
|
||||
|
||||
You may not free an urb that you've submitted, but which hasn't yet been
|
||||
returned to you in a completion callback.
|
||||
You may free an urb that you've submitted, but which hasn't yet been
|
||||
returned to you in a completion callback. It will automatically be
|
||||
deallocated when it is no longer in use.
|
||||
|
||||
|
||||
1.4. What has to be filled in?
|
||||
|
@ -145,30 +147,36 @@ to get seamless ISO streaming.
|
|||
|
||||
1.6. How to cancel an already running URB?
|
||||
|
||||
For an URB which you've submitted, but which hasn't been returned to
|
||||
your driver by the host controller, call
|
||||
There are two ways to cancel an URB you've submitted but which hasn't
|
||||
been returned to your driver yet. For an asynchronous cancel, call
|
||||
|
||||
int usb_unlink_urb(struct urb *urb)
|
||||
|
||||
It removes the urb from the internal list and frees all allocated
|
||||
HW descriptors. The status is changed to reflect unlinking. After
|
||||
usb_unlink_urb() returns with that status code, you can free the URB
|
||||
with usb_free_urb().
|
||||
HW descriptors. The status is changed to reflect unlinking. Note
|
||||
that the URB will not normally have finished when usb_unlink_urb()
|
||||
returns; you must still wait for the completion handler to be called.
|
||||
|
||||
There is also an asynchronous unlink mode. To use this, set the
|
||||
the URB_ASYNC_UNLINK flag in urb->transfer flags before calling
|
||||
usb_unlink_urb(). When using async unlinking, the URB will not
|
||||
normally be unlinked when usb_unlink_urb() returns. Instead, wait
|
||||
for the completion handler to be called.
|
||||
To cancel an URB synchronously, call
|
||||
|
||||
void usb_kill_urb(struct urb *urb)
|
||||
|
||||
It does everything usb_unlink_urb does, and in addition it waits
|
||||
until after the URB has been returned and the completion handler
|
||||
has finished. It also marks the URB as temporarily unusable, so
|
||||
that if the completion handler or anyone else tries to resubmit it
|
||||
they will get a -EPERM error. Thus you can be sure that when
|
||||
usb_kill_urb() returns, the URB is totally idle.
|
||||
|
||||
|
||||
1.7. What about the completion handler?
|
||||
|
||||
The handler is of the following type:
|
||||
|
||||
typedef void (*usb_complete_t)(struct urb *);
|
||||
typedef void (*usb_complete_t)(struct urb *, struct pt_regs *)
|
||||
|
||||
i.e. it gets just the URB that caused the completion call.
|
||||
I.e., it gets the URB that caused the completion call, plus the
|
||||
register values at the time of the corresponding interrupt (if any).
|
||||
In the completion handler, you should have a look at urb->status to
|
||||
detect any USB errors. Since the context parameter is included in the URB,
|
||||
you can pass information to the completion handler.
|
||||
|
@ -176,17 +184,11 @@ you can pass information to the completion handler.
|
|||
Note that even when an error (or unlink) is reported, data may have been
|
||||
transferred. That's because USB transfers are packetized; it might take
|
||||
sixteen packets to transfer your 1KByte buffer, and ten of them might
|
||||
have transferred succesfully before the completion is called.
|
||||
have transferred succesfully before the completion was called.
|
||||
|
||||
|
||||
NOTE: ***** WARNING *****
|
||||
Don't use urb->dev field in your completion handler; it's cleared
|
||||
as part of giving urbs back to drivers. (Addressing an issue with
|
||||
ownership of periodic URBs, which was otherwise ambiguous.) Instead,
|
||||
use urb->context to hold all the data your driver needs.
|
||||
|
||||
NOTE: ***** WARNING *****
|
||||
Also, NEVER SLEEP IN A COMPLETION HANDLER. These are normally called
|
||||
NEVER SLEEP IN A COMPLETION HANDLER. These are normally called
|
||||
during hardware interrupt processing. If you can, defer substantial
|
||||
work to a tasklet (bottom half) to keep system latencies low. You'll
|
||||
probably need to use spinlocks to protect data structures you manipulate
|
||||
|
@ -229,24 +231,10 @@ ISO data with some other event stream.
|
|||
Interrupt transfers, like isochronous transfers, are periodic, and happen
|
||||
in intervals that are powers of two (1, 2, 4 etc) units. Units are frames
|
||||
for full and low speed devices, and microframes for high speed ones.
|
||||
|
||||
Currently, after you submit one interrupt URB, that urb is owned by the
|
||||
host controller driver until you cancel it with usb_unlink_urb(). You
|
||||
may unlink interrupt urbs in their completion handlers, if you need to.
|
||||
|
||||
After a transfer completion is called, the URB is automagically resubmitted.
|
||||
THIS BEHAVIOR IS EXPECTED TO BE REMOVED!!
|
||||
|
||||
Interrupt transfers may only send (or receive) the "maxpacket" value for
|
||||
the given interrupt endpoint; if you need more data, you will need to
|
||||
copy that data out of (or into) another buffer. Similarly, you can't
|
||||
queue interrupt transfers.
|
||||
THESE RESTRICTIONS ARE EXPECTED TO BE REMOVED!!
|
||||
|
||||
Note that this automagic resubmission model does make it awkward to use
|
||||
interrupt OUT transfers. The portable solution involves unlinking those
|
||||
OUT urbs after the data is transferred, and perhaps submitting a final
|
||||
URB for a short packet.
|
||||
|
||||
The usb_submit_urb() call modifies urb->interval to the implemented interval
|
||||
value that is less than or equal to the requested interval value.
|
||||
|
||||
In Linux 2.6, unlike earlier versions, interrupt URBs are not automagically
|
||||
restarted when they complete. They end when the completion handler is
|
||||
called, just like other URBs. If you want an interrupt URB to be restarted,
|
||||
your completion handler must resubmit it.
|
||||
|
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -1063,8 +1063,6 @@ M: wli@holomorphy.com
|
|||
S: Maintained
|
||||
|
||||
I2C SUBSYSTEM
|
||||
P: Greg Kroah-Hartman
|
||||
M: greg@kroah.com
|
||||
P: Jean Delvare
|
||||
M: khali@linux-fr.org
|
||||
L: lm-sensors@lm-sensors.org
|
||||
|
@ -1404,6 +1402,18 @@ L: linux-kernel@vger.kernel.org
|
|||
L: fastboot@osdl.org
|
||||
S: Maintained
|
||||
|
||||
KPROBES
|
||||
P: Prasanna S Panchamukhi
|
||||
M: prasanna@in.ibm.com
|
||||
P: Ananth N Mavinakayanahalli
|
||||
M: ananth@in.ibm.com
|
||||
P: Anil S Keshavamurthy
|
||||
M: anil.s.keshavamurthy@intel.com
|
||||
P: David S. Miller
|
||||
M: davem@davemloft.net
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
LANMEDIA WAN CARD DRIVER
|
||||
P: Andrew Stanley-Jones
|
||||
M: asj@lanmedia.com
|
||||
|
|
8
README
8
README
|
@ -151,7 +151,7 @@ CONFIGURING the kernel:
|
|||
your existing ./.config file.
|
||||
"make silentoldconfig"
|
||||
Like above, but avoids cluttering the screen
|
||||
with question already answered.
|
||||
with questions already answered.
|
||||
|
||||
NOTES on "make config":
|
||||
- having unnecessary drivers will make the kernel bigger, and can
|
||||
|
@ -199,9 +199,9 @@ COMPILING the kernel:
|
|||
are installing a new kernel with the same version number as your
|
||||
working kernel, make a backup of your modules directory before you
|
||||
do a "make modules_install".
|
||||
In alternative, before compiling, edit your Makefile and change the
|
||||
"EXTRAVERSION" line - its content is appended to the regular kernel
|
||||
version.
|
||||
Alternatively, before compiling, use the kernel config option
|
||||
"LOCALVERSION" to append a unique suffix to the regular kernel version.
|
||||
LOCALVERSION can be set in the "General Setup" menu.
|
||||
|
||||
- In order to boot your new kernel, you'll need to copy the kernel
|
||||
image (e.g. .../linux/arch/i386/boot/bzImage after compilation)
|
||||
|
|
|
@ -127,6 +127,10 @@ common_shutdown_1(void *generic_ptr)
|
|||
/* If booted from SRM, reset some of the original environment. */
|
||||
if (alpha_using_srm) {
|
||||
#ifdef CONFIG_DUMMY_CONSOLE
|
||||
/* If we've gotten here after SysRq-b, leave interrupt
|
||||
context before taking over the console. */
|
||||
if (in_interrupt())
|
||||
irq_exit();
|
||||
/* This has the effect of resetting the VGA video origin. */
|
||||
take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
|
||||
#endif
|
||||
|
|
|
@ -537,7 +537,7 @@ ENTRY(__switch_to)
|
|||
#ifdef CONFIG_CPU_MPCORE
|
||||
clrex
|
||||
#else
|
||||
strex r3, r4, [ip] @ Clear exclusive monitor
|
||||
strex r5, r4, [ip] @ Clear exclusive monitor
|
||||
#endif
|
||||
#endif
|
||||
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Copy data from IO memory space to "real" memory space.
|
||||
* This needs to be optimized.
|
||||
*/
|
||||
void _memcpy_fromio(void *to, void __iomem *from, size_t count)
|
||||
void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
|
||||
{
|
||||
unsigned char *t = to;
|
||||
while (count) {
|
||||
|
@ -22,7 +22,7 @@ void _memcpy_fromio(void *to, void __iomem *from, size_t count)
|
|||
* Copy data from "real" memory space to IO memory space.
|
||||
* This needs to be optimized.
|
||||
*/
|
||||
void _memcpy_toio(void __iomem *to, const void *from, size_t count)
|
||||
void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
|
||||
{
|
||||
const unsigned char *f = from;
|
||||
while (count) {
|
||||
|
@ -37,7 +37,7 @@ void _memcpy_toio(void __iomem *to, const void *from, size_t count)
|
|||
* "memset" on IO memory space.
|
||||
* This needs to be optimized.
|
||||
*/
|
||||
void _memset_io(void __iomem *dst, int c, size_t count)
|
||||
void _memset_io(volatile void __iomem *dst, int c, size_t count)
|
||||
{
|
||||
while (count) {
|
||||
count--;
|
||||
|
|
|
@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
|
|||
simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512);
|
||||
}
|
||||
|
||||
static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
|
||||
{
|
||||
|
||||
int scatterlen = sc->use_sg;
|
||||
struct scatterlist *slp;
|
||||
|
||||
if (scatterlen == 0)
|
||||
memcpy(sc->request_buffer, buf, len);
|
||||
else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
|
||||
unsigned thislen = min(len, slp->length);
|
||||
|
||||
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
|
||||
slp++;
|
||||
len -= thislen;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
|
@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
|||
char fname[MAX_ROOT_LEN+16];
|
||||
size_t disk_size;
|
||||
char *buf;
|
||||
char localbuf[36];
|
||||
#if DEBUG_SIMSCSI
|
||||
register long sp asm ("sp");
|
||||
|
||||
|
@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
|||
/* disk doesn't exist... */
|
||||
break;
|
||||
}
|
||||
buf = sc->request_buffer;
|
||||
buf = localbuf;
|
||||
buf[0] = 0; /* magnetic disk */
|
||||
buf[1] = 0; /* not a removable medium */
|
||||
buf[2] = 2; /* SCSI-2 compliant device */
|
||||
|
@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
|||
buf[6] = 0; /* reserved */
|
||||
buf[7] = 0; /* various flags */
|
||||
memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28);
|
||||
simscsi_fillresult(sc, buf, 36);
|
||||
sc->result = GOOD;
|
||||
break;
|
||||
|
||||
|
@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
|||
simscsi_readwrite10(sc, SSC_WRITE);
|
||||
break;
|
||||
|
||||
|
||||
case READ_CAPACITY:
|
||||
if (desc[target_id] < 0 || sc->request_bufflen < 8) {
|
||||
break;
|
||||
}
|
||||
buf = sc->request_buffer;
|
||||
|
||||
buf = localbuf;
|
||||
disk_size = simscsi_get_disk_size(desc[target_id]);
|
||||
|
||||
/* pretend to be a 1GB disk (partition table contains real stuff): */
|
||||
buf[0] = (disk_size >> 24) & 0xff;
|
||||
buf[1] = (disk_size >> 16) & 0xff;
|
||||
buf[2] = (disk_size >> 8) & 0xff;
|
||||
|
@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
|||
buf[5] = 0;
|
||||
buf[6] = 2;
|
||||
buf[7] = 0;
|
||||
simscsi_fillresult(sc, buf, 8);
|
||||
sc->result = GOOD;
|
||||
break;
|
||||
|
||||
case MODE_SENSE:
|
||||
case MODE_SENSE_10:
|
||||
/* sd.c uses this to determine whether disk does write-caching. */
|
||||
memset(sc->request_buffer, 0, 128);
|
||||
simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen);
|
||||
sc->result = GOOD;
|
||||
break;
|
||||
|
||||
|
|
|
@ -489,24 +489,27 @@ ia64_state_save:
|
|||
;;
|
||||
st8 [temp1]=r17,16 // pal_min_state
|
||||
st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
|
||||
mov r6=IA64_KR(CURRENT_STACK)
|
||||
;;
|
||||
st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
|
||||
st8 [temp2]=r0,16 // prev_task, starts off as NULL
|
||||
mov r6=cr.ifa
|
||||
;;
|
||||
st8 [temp1]=r0,16 // prev_task, starts off as NULL
|
||||
st8 [temp2]=r12,16 // cr.isr
|
||||
st8 [temp1]=r12,16 // cr.isr
|
||||
st8 [temp2]=r6,16 // cr.ifa
|
||||
mov r12=cr.itir
|
||||
;;
|
||||
st8 [temp1]=r6,16 // cr.ifa
|
||||
st8 [temp2]=r12,16 // cr.itir
|
||||
st8 [temp1]=r12,16 // cr.itir
|
||||
st8 [temp2]=r11,16 // cr.iipa
|
||||
mov r12=cr.iim
|
||||
;;
|
||||
st8 [temp1]=r11,16 // cr.iipa
|
||||
st8 [temp2]=r12,16 // cr.iim
|
||||
mov r6=cr.iha
|
||||
st8 [temp1]=r12,16 // cr.iim
|
||||
(p1) mov r12=IA64_MCA_COLD_BOOT
|
||||
(p2) mov r12=IA64_INIT_WARM_BOOT
|
||||
mov r6=cr.iha
|
||||
;;
|
||||
st8 [temp1]=r6,16 // cr.iha
|
||||
st8 [temp2]=r12 // os_status, default is cold boot
|
||||
st8 [temp2]=r6,16 // cr.iha
|
||||
st8 [temp1]=r12 // os_status, default is cold boot
|
||||
mov r6=IA64_MCA_SAME_CONTEXT
|
||||
;;
|
||||
st8 [temp1]=r6 // context, default is same context
|
||||
|
@ -823,9 +826,12 @@ ia64_state_restore:
|
|||
ld8 r12=[temp1],16 // sal_ra
|
||||
ld8 r9=[temp2],16 // sal_gp
|
||||
;;
|
||||
ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task
|
||||
ld8 r22=[temp1],16 // pal_min_state, virtual
|
||||
ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT
|
||||
;;
|
||||
ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
|
||||
ld8 r20=[temp2],16 // prev_task
|
||||
;;
|
||||
ld8 temp3=[temp1],16 // cr.isr
|
||||
ld8 temp4=[temp2],16 // cr.ifa
|
||||
;;
|
||||
|
@ -846,6 +852,45 @@ ia64_state_restore:
|
|||
ld8 r8=[temp1] // os_status
|
||||
ld8 r10=[temp2] // context
|
||||
|
||||
/* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
|
||||
* avoid any dependencies on the algorithm in ia64_switch_to(), just
|
||||
* purge any existing CURRENT_STACK mapping and insert the new one.
|
||||
*
|
||||
* r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains
|
||||
* prev_IA64_KR_CURRENT, these values may have been changed by the C
|
||||
* code. Do not use r8, r9, r10, r22, they contain values ready for
|
||||
* the return to SAL.
|
||||
*/
|
||||
|
||||
mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
|
||||
;;
|
||||
shl r15=r15,IA64_GRANULE_SHIFT
|
||||
;;
|
||||
dep r15=-1,r15,61,3 // virtual granule
|
||||
mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
|
||||
;;
|
||||
ptr.d r15,r18
|
||||
;;
|
||||
srlz.d
|
||||
|
||||
extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT
|
||||
shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
|
||||
movl r21=PAGE_KERNEL // page properties
|
||||
;;
|
||||
mov IA64_KR(CURRENT_STACK)=r16
|
||||
cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
|
||||
or r21=r20,r21 // construct PA | page properties
|
||||
(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
|
||||
;;
|
||||
mov cr.itir=r18
|
||||
mov cr.ifa=r21
|
||||
mov r20=IA64_TR_CURRENT_STACK
|
||||
;;
|
||||
itr.d dtr[r20]=r21
|
||||
;;
|
||||
srlz.d
|
||||
1:
|
||||
|
||||
br.sptk b0
|
||||
|
||||
//EndStub//////////////////////////////////////////////////////////////////////
|
||||
|
@ -982,6 +1027,7 @@ ia64_set_kernel_registers:
|
|||
add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
|
||||
add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
|
||||
add r13=temp1, r3 // set current to start of MCA/INIT stack
|
||||
add r20=temp1, r3 // physical start of MCA/INIT stack
|
||||
;;
|
||||
ld8 r1=[temp4] // OS GP from SAL OS state
|
||||
;;
|
||||
|
@ -991,7 +1037,35 @@ ia64_set_kernel_registers:
|
|||
;;
|
||||
mov IA64_KR(CURRENT)=r13
|
||||
|
||||
// FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK?
|
||||
/* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
|
||||
* any dependencies on the algorithm in ia64_switch_to(), just purge
|
||||
* any existing CURRENT_STACK mapping and insert the new one.
|
||||
*/
|
||||
|
||||
mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
|
||||
;;
|
||||
shl r16=r16,IA64_GRANULE_SHIFT
|
||||
;;
|
||||
dep r16=-1,r16,61,3 // virtual granule
|
||||
mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
|
||||
;;
|
||||
ptr.d r16,r18
|
||||
;;
|
||||
srlz.d
|
||||
|
||||
shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
|
||||
movl r21=PAGE_KERNEL // page properties
|
||||
;;
|
||||
mov IA64_KR(CURRENT_STACK)=r16
|
||||
or r21=r20,r21 // construct PA | page properties
|
||||
;;
|
||||
mov cr.itir=r18
|
||||
mov cr.ifa=r13
|
||||
mov r20=IA64_TR_CURRENT_STACK
|
||||
;;
|
||||
itr.d dtr[r20]=r21
|
||||
;;
|
||||
srlz.d
|
||||
|
||||
br.sptk b0
|
||||
|
||||
|
|
|
@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE];
|
|||
static int num_page_isolate = 0;
|
||||
|
||||
typedef enum {
|
||||
ISOLATE_NG = 0,
|
||||
ISOLATE_OK = 1
|
||||
ISOLATE_NG,
|
||||
ISOLATE_OK,
|
||||
ISOLATE_NONE
|
||||
} isolate_status_t;
|
||||
|
||||
/*
|
||||
|
@ -74,7 +75,7 @@ static struct {
|
|||
* @paddr: poisoned memory location
|
||||
*
|
||||
* Return value:
|
||||
* ISOLATE_OK / ISOLATE_NG
|
||||
* one of isolate_status_t, ISOLATE_OK/NG/NONE.
|
||||
*/
|
||||
|
||||
static isolate_status_t
|
||||
|
@ -85,7 +86,10 @@ mca_page_isolate(unsigned long paddr)
|
|||
|
||||
/* whether physical address is valid or not */
|
||||
if (!ia64_phys_addr_valid(paddr))
|
||||
return ISOLATE_NG;
|
||||
return ISOLATE_NONE;
|
||||
|
||||
if (!pfn_valid(paddr))
|
||||
return ISOLATE_NONE;
|
||||
|
||||
/* convert physical address to physical page number */
|
||||
p = pfn_to_page(paddr>>PAGE_SHIFT);
|
||||
|
@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr)
|
|||
current->pid, current->comm);
|
||||
|
||||
spin_lock(&mca_bh_lock);
|
||||
if (mca_page_isolate(paddr) == ISOLATE_OK) {
|
||||
switch (mca_page_isolate(paddr)) {
|
||||
case ISOLATE_OK:
|
||||
printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
|
||||
} else {
|
||||
break;
|
||||
case ISOLATE_NG:
|
||||
printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
spin_unlock(&mca_bh_lock);
|
||||
|
||||
|
|
|
@ -15,9 +15,8 @@ extra-y += vmlinux.lds
|
|||
obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
|
||||
process.o signal.o ptrace.o align.o \
|
||||
semaphore.o syscalls.o setup.o \
|
||||
cputable.o ppc_htab.o
|
||||
cputable.o ppc_htab.o perfmon.o
|
||||
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
|
||||
obj-$(CONFIG_E500) += perfmon.o
|
||||
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
|
||||
obj-$(CONFIG_POWER4) += cpu_setup_power4.o
|
||||
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
|
||||
|
|
|
@ -45,7 +45,7 @@ static void dummy_perf(struct pt_regs *regs)
|
|||
mtpmr(PMRN_PMGC0, pmgc0);
|
||||
}
|
||||
|
||||
#else
|
||||
#elif CONFIG_6xx
|
||||
/* Ensure exceptions are disabled */
|
||||
|
||||
static void dummy_perf(struct pt_regs *regs)
|
||||
|
@ -55,6 +55,10 @@ static void dummy_perf(struct pt_regs *regs)
|
|||
mmcr0 &= ~MMCR0_PMXE;
|
||||
mtspr(SPRN_MMCR0, mmcr0);
|
||||
}
|
||||
#else
|
||||
static void dummy_perf(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void (*perf_irq)(struct pt_regs *) = dummy_perf;
|
||||
|
|
|
@ -719,7 +719,8 @@ pmac_declare_of_platform_devices(void)
|
|||
if (np) {
|
||||
for (np = np->child; np != NULL; np = np->sibling)
|
||||
if (strncmp(np->name, "i2c", 3) == 0) {
|
||||
of_platform_device_create(np, "uni-n-i2c");
|
||||
of_platform_device_create(np, "uni-n-i2c",
|
||||
NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -727,17 +728,18 @@ pmac_declare_of_platform_devices(void)
|
|||
if (np) {
|
||||
for (np = np->child; np != NULL; np = np->sibling)
|
||||
if (strncmp(np->name, "i2c", 3) == 0) {
|
||||
of_platform_device_create(np, "u3-i2c");
|
||||
of_platform_device_create(np, "u3-i2c",
|
||||
NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
np = find_devices("valkyrie");
|
||||
if (np)
|
||||
of_platform_device_create(np, "valkyrie");
|
||||
of_platform_device_create(np, "valkyrie", NULL);
|
||||
np = find_devices("platinum");
|
||||
if (np)
|
||||
of_platform_device_create(np, "platinum");
|
||||
of_platform_device_create(np, "platinum", NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -234,7 +234,9 @@ void of_device_unregister(struct of_device *ofdev)
|
|||
device_unregister(&ofdev->dev);
|
||||
}
|
||||
|
||||
struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id)
|
||||
struct of_device* of_platform_device_create(struct device_node *np,
|
||||
const char *bus_id,
|
||||
struct device *parent)
|
||||
{
|
||||
struct of_device *dev;
|
||||
u32 *reg;
|
||||
|
@ -247,7 +249,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
|
|||
dev->node = of_node_get(np);
|
||||
dev->dma_mask = 0xffffffffUL;
|
||||
dev->dev.dma_mask = &dev->dma_mask;
|
||||
dev->dev.parent = NULL;
|
||||
dev->dev.parent = parent;
|
||||
dev->dev.bus = &of_platform_bus_type;
|
||||
dev->dev.release = of_release_dev;
|
||||
|
||||
|
|
|
@ -184,8 +184,8 @@ mpc85xx_setup_pci1(struct pci_controller *hose)
|
|||
pci->powar1 = 0x80044000 |
|
||||
(__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
|
||||
|
||||
/* Setup outboud IO windows @ MPC85XX_PCI1_IO_BASE */
|
||||
pci->potar2 = 0x00000000;
|
||||
/* Setup outbound IO windows @ MPC85XX_PCI1_IO_BASE */
|
||||
pci->potar2 = (MPC85XX_PCI1_LOWER_IO >> 12) & 0x000fffff;
|
||||
pci->potear2 = 0x00000000;
|
||||
pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
|
||||
/* Enable, IO R/W */
|
||||
|
@ -235,8 +235,8 @@ mpc85xx_setup_pci2(struct pci_controller *hose)
|
|||
pci->powar1 = 0x80044000 |
|
||||
(__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1);
|
||||
|
||||
/* Setup outboud IO windows @ MPC85XX_PCI2_IO_BASE */
|
||||
pci->potar2 = 0x00000000;
|
||||
/* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */
|
||||
pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;;
|
||||
pci->potear2 = 0x00000000;
|
||||
pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
|
||||
/* Enable, IO R/W */
|
||||
|
|
|
@ -233,7 +233,9 @@ void of_device_unregister(struct of_device *ofdev)
|
|||
device_unregister(&ofdev->dev);
|
||||
}
|
||||
|
||||
struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id)
|
||||
struct of_device* of_platform_device_create(struct device_node *np,
|
||||
const char *bus_id,
|
||||
struct device *parent)
|
||||
{
|
||||
struct of_device *dev;
|
||||
|
||||
|
@ -245,7 +247,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
|
|||
dev->node = np;
|
||||
dev->dma_mask = 0xffffffffUL;
|
||||
dev->dev.dma_mask = &dev->dma_mask;
|
||||
dev->dev.parent = NULL;
|
||||
dev->dev.parent = parent;
|
||||
dev->dev.bus = &of_platform_bus_type;
|
||||
dev->dev.release = of_release_dev;
|
||||
|
||||
|
@ -259,6 +261,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
|
|||
return dev;
|
||||
}
|
||||
|
||||
|
||||
EXPORT_SYMBOL(of_match_device);
|
||||
EXPORT_SYMBOL(of_platform_bus_type);
|
||||
EXPORT_SYMBOL(of_register_driver);
|
||||
|
|
|
@ -364,7 +364,8 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
|
|||
|
||||
while (pci->phb->dma_window_size * children > 0x80000000ul)
|
||||
pci->phb->dma_window_size >>= 1;
|
||||
DBG("No ISA/IDE, window size is %x\n", pci->phb->dma_window_size);
|
||||
DBG("No ISA/IDE, window size is 0x%lx\n",
|
||||
pci->phb->dma_window_size);
|
||||
pci->phb->dma_window_base_cur = 0;
|
||||
|
||||
return;
|
||||
|
@ -388,7 +389,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
|
|||
while (pci->phb->dma_window_size * children > 0x70000000ul)
|
||||
pci->phb->dma_window_size >>= 1;
|
||||
|
||||
DBG("ISA/IDE, window size is %x\n", pci->phb->dma_window_size);
|
||||
DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size);
|
||||
|
||||
}
|
||||
|
||||
|
@ -442,7 +443,7 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
|
|||
struct device_node *dn, *mydn;
|
||||
struct iommu_table *tbl;
|
||||
|
||||
DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, dev->pretty_name);
|
||||
DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev));
|
||||
|
||||
mydn = dn = pci_device_to_OF_node(dev);
|
||||
|
||||
|
@ -469,7 +470,7 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
|
|||
if (dn && dn->data) {
|
||||
PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
|
||||
} else {
|
||||
DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, dev->pretty_name);
|
||||
DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,7 +504,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
|
|||
int *dma_window = NULL;
|
||||
struct pci_dn *pci;
|
||||
|
||||
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name);
|
||||
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev));
|
||||
|
||||
/* dev setup for LPAR is a little tricky, since the device tree might
|
||||
* contain the dma-window properties per-device and not neccesarily
|
||||
|
@ -525,9 +526,8 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
|
|||
* slots on POWER4 machines.
|
||||
*/
|
||||
if (dma_window == NULL || pdn->parent == NULL) {
|
||||
/* Fall back to regular (non-LPAR) dev setup */
|
||||
DBG("No dma window for device, falling back to regular setup\n");
|
||||
iommu_dev_setup_pSeries(dev);
|
||||
DBG("No dma window for device, linking to parent\n");
|
||||
PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table;
|
||||
return;
|
||||
} else {
|
||||
DBG("Found DMA window, allocating table\n");
|
||||
|
|
|
@ -434,15 +434,23 @@ static int pmac_check_legacy_ioport(unsigned int baseport)
|
|||
|
||||
static int __init pmac_declare_of_platform_devices(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device_node *np, *npp;
|
||||
|
||||
np = find_devices("u3");
|
||||
if (np) {
|
||||
for (np = np->child; np != NULL; np = np->sibling)
|
||||
npp = of_find_node_by_name(NULL, "u3");
|
||||
if (npp) {
|
||||
for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
|
||||
if (strncmp(np->name, "i2c", 3) == 0) {
|
||||
of_platform_device_create(np, "u3-i2c");
|
||||
of_platform_device_create(np, "u3-i2c", NULL);
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
}
|
||||
of_node_put(npp);
|
||||
}
|
||||
npp = of_find_node_by_type(NULL, "smu");
|
||||
if (npp) {
|
||||
of_platform_device_create(npp, "smu", NULL);
|
||||
of_node_put(npp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -84,7 +84,7 @@ void __pmac pmac_get_rtc_time(struct rtc_time *tm)
|
|||
|
||||
#ifdef CONFIG_PMAC_SMU
|
||||
case SYS_CTRLER_SMU:
|
||||
smu_get_rtc_time(tm);
|
||||
smu_get_rtc_time(tm, 1);
|
||||
break;
|
||||
#endif /* CONFIG_PMAC_SMU */
|
||||
default:
|
||||
|
@ -128,7 +128,7 @@ int __pmac pmac_set_rtc_time(struct rtc_time *tm)
|
|||
|
||||
#ifdef CONFIG_PMAC_SMU
|
||||
case SYS_CTRLER_SMU:
|
||||
return smu_set_rtc_time(tm);
|
||||
return smu_set_rtc_time(tm, 1);
|
||||
#endif /* CONFIG_PMAC_SMU */
|
||||
default:
|
||||
return -ENODEV;
|
||||
|
|
|
@ -219,6 +219,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
|
|||
|
||||
case PTRACE_SET_DEBUGREG:
|
||||
ret = ptrace_set_debugreg(child, addr, data);
|
||||
break;
|
||||
|
||||
case PTRACE_DETACH:
|
||||
ret = ptrace_detach(child, data);
|
||||
|
|
|
@ -343,9 +343,7 @@ static void native_flush_hash_range(unsigned long context,
|
|||
hpte_t *hptep;
|
||||
unsigned long hpte_v;
|
||||
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
|
||||
|
||||
/* XXX fix for large ptes */
|
||||
unsigned long large = 0;
|
||||
unsigned long large;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
|
@ -358,6 +356,7 @@ static void native_flush_hash_range(unsigned long context,
|
|||
|
||||
va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
|
||||
batch->vaddr[j] = va;
|
||||
large = pte_huge(batch->pte[i]);
|
||||
if (large)
|
||||
vpn = va >> HPAGE_SHIFT;
|
||||
else
|
||||
|
|
|
@ -710,10 +710,13 @@ repeat:
|
|||
hpte_group = ((~hash & htab_hash_mask) *
|
||||
HPTES_PER_GROUP) & ~0x7UL;
|
||||
slot = ppc_md.hpte_insert(hpte_group, va, prpn,
|
||||
HPTE_V_LARGE, rflags);
|
||||
HPTE_V_LARGE |
|
||||
HPTE_V_SECONDARY,
|
||||
rflags);
|
||||
if (slot == -1) {
|
||||
if (mftb() & 0x1)
|
||||
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
|
||||
hpte_group = ((hash & htab_hash_mask) *
|
||||
HPTES_PER_GROUP)&~0x7UL;
|
||||
|
||||
ppc_md.hpte_remove(hpte_group);
|
||||
goto repeat;
|
||||
|
|
|
@ -19,18 +19,44 @@
|
|||
#include "line.h"
|
||||
#include "os.h"
|
||||
|
||||
#ifdef CONFIG_NOCONFIG_CHAN
|
||||
/* XXX: could well be moved to somewhere else, if needed. */
|
||||
static int my_printf(const char * fmt, ...)
|
||||
__attribute__ ((format (printf, 1, 2)));
|
||||
|
||||
/* The printk's here are wrong because we are complaining that there is no
|
||||
* output device, but printk is printing to that output device. The user will
|
||||
* never see the error. printf would be better, except it can't run on a
|
||||
* kernel stack because it will overflow it.
|
||||
* Use printk for now since that will avoid crashing.
|
||||
*/
|
||||
static int my_printf(const char * fmt, ...)
|
||||
{
|
||||
/* Yes, can be called on atomic context.*/
|
||||
char *buf = kmalloc(4096, GFP_ATOMIC);
|
||||
va_list args;
|
||||
int r;
|
||||
|
||||
if (!buf) {
|
||||
/* We print directly fmt.
|
||||
* Yes, yes, yes, feel free to complain. */
|
||||
r = strlen(fmt);
|
||||
} else {
|
||||
va_start(args, fmt);
|
||||
r = vsprintf(buf, fmt, args);
|
||||
va_end(args);
|
||||
fmt = buf;
|
||||
}
|
||||
|
||||
if (r)
|
||||
r = os_write_file(1, fmt, r);
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NOCONFIG_CHAN
|
||||
/* Despite its name, there's no added trailing newline. */
|
||||
static int my_puts(const char * buf)
|
||||
{
|
||||
return os_write_file(1, buf, strlen(buf));
|
||||
}
|
||||
|
||||
static void *not_configged_init(char *str, int device, struct chan_opts *opts)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
return(NULL);
|
||||
}
|
||||
|
@ -38,27 +64,27 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts)
|
|||
static int not_configged_open(int input, int output, int primary, void *data,
|
||||
char **dev_out)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
return(-ENODEV);
|
||||
}
|
||||
|
||||
static void not_configged_close(int fd, void *data)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
}
|
||||
|
||||
static int not_configged_read(int fd, char *c_out, void *data)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
return(-EIO);
|
||||
}
|
||||
|
||||
static int not_configged_write(int fd, const char *buf, int len, void *data)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
return(-EIO);
|
||||
}
|
||||
|
@ -66,7 +92,7 @@ static int not_configged_write(int fd, const char *buf, int len, void *data)
|
|||
static int not_configged_console_write(int fd, const char *buf, int len,
|
||||
void *data)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
return(-EIO);
|
||||
}
|
||||
|
@ -74,14 +100,14 @@ static int not_configged_console_write(int fd, const char *buf, int len,
|
|||
static int not_configged_window_size(int fd, void *data, unsigned short *rows,
|
||||
unsigned short *cols)
|
||||
{
|
||||
printk(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
return(-ENODEV);
|
||||
}
|
||||
|
||||
static void not_configged_free(void *data)
|
||||
{
|
||||
printf(KERN_ERR "Using a channel type which is configured out of "
|
||||
my_puts("Using a channel type which is configured out of "
|
||||
"UML\n");
|
||||
}
|
||||
|
||||
|
@ -457,7 +483,7 @@ static struct chan *parse_chan(char *str, int pri, int device,
|
|||
}
|
||||
}
|
||||
if(ops == NULL){
|
||||
printk(KERN_ERR "parse_chan couldn't parse \"%s\"\n",
|
||||
my_printf("parse_chan couldn't parse \"%s\"\n",
|
||||
str);
|
||||
return(NULL);
|
||||
}
|
||||
|
@ -465,7 +491,7 @@ static struct chan *parse_chan(char *str, int pri, int device,
|
|||
data = (*ops->init)(str, device, opts);
|
||||
if(data == NULL) return(NULL);
|
||||
|
||||
chan = kmalloc(sizeof(*chan), GFP_KERNEL);
|
||||
chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
|
||||
if(chan == NULL) return(NULL);
|
||||
*chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
|
||||
.primary = 1,
|
||||
|
|
|
@ -23,7 +23,7 @@ static struct mconsole_command commands[] = {
|
|||
{ "reboot", mconsole_reboot, MCONSOLE_PROC },
|
||||
{ "config", mconsole_config, MCONSOLE_PROC },
|
||||
{ "remove", mconsole_remove, MCONSOLE_PROC },
|
||||
{ "sysrq", mconsole_sysrq, MCONSOLE_INTR },
|
||||
{ "sysrq", mconsole_sysrq, MCONSOLE_PROC },
|
||||
{ "help", mconsole_help, MCONSOLE_INTR },
|
||||
{ "cad", mconsole_cad, MCONSOLE_INTR },
|
||||
{ "stop", mconsole_stop, MCONSOLE_PROC },
|
||||
|
|
|
@ -14,7 +14,9 @@ extern void *um_kmalloc_atomic(int size);
|
|||
extern void kfree(void *ptr);
|
||||
extern int in_aton(char *str);
|
||||
extern int open_gdb_chan(void);
|
||||
extern int strlcpy(char *, const char *, int);
|
||||
/* These use size_t, however unsigned long is correct on both i386 and x86_64. */
|
||||
extern unsigned long strlcpy(char *, const char *, unsigned long);
|
||||
extern unsigned long strlcat(char *, const char *, unsigned long);
|
||||
extern void *um_vmalloc(int size);
|
||||
extern void vfree(void *ptr);
|
||||
|
||||
|
|
|
@ -82,7 +82,8 @@ unsigned long alloc_stack(int order, int atomic)
|
|||
unsigned long page;
|
||||
int flags = GFP_KERNEL;
|
||||
|
||||
if(atomic) flags |= GFP_ATOMIC;
|
||||
if (atomic)
|
||||
flags = GFP_ATOMIC;
|
||||
page = __get_free_pages(flags, order);
|
||||
if(page == 0)
|
||||
return(0);
|
||||
|
|
|
@ -340,7 +340,7 @@ static int setup_initial_poll(int fd)
|
|||
{
|
||||
struct pollfd *p;
|
||||
|
||||
p = um_kmalloc(sizeof(struct pollfd));
|
||||
p = um_kmalloc_atomic(sizeof(struct pollfd));
|
||||
if(p == NULL){
|
||||
printk("setup_initial_poll : failed to allocate poll\n");
|
||||
return(-1);
|
||||
|
|
|
@ -193,12 +193,12 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
|||
r = pte_read(*npte);
|
||||
w = pte_write(*npte);
|
||||
x = pte_exec(*npte);
|
||||
if(!pte_dirty(*npte))
|
||||
w = 0;
|
||||
if(!pte_young(*npte)){
|
||||
r = 0;
|
||||
w = 0;
|
||||
}
|
||||
if (!pte_young(*npte)) {
|
||||
r = 0;
|
||||
w = 0;
|
||||
} else if (!pte_dirty(*npte)) {
|
||||
w = 0;
|
||||
}
|
||||
if(force || pte_newpage(*npte)){
|
||||
if(pte_present(*npte))
|
||||
ret = add_mmap(addr,
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "asm/a.out.h"
|
||||
#include "asm/current.h"
|
||||
#include "asm/irq.h"
|
||||
#include "sysdep/sigcontext.h"
|
||||
#include "user_util.h"
|
||||
#include "kern_util.h"
|
||||
#include "kern.h"
|
||||
|
@ -39,6 +40,12 @@ int handle_page_fault(unsigned long address, unsigned long ip,
|
|||
int err = -EFAULT;
|
||||
|
||||
*code_out = SEGV_MAPERR;
|
||||
|
||||
/* If the fault was during atomic operation, don't take the fault, just
|
||||
* fail. */
|
||||
if (in_atomic())
|
||||
goto out_nosemaphore;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, address);
|
||||
if(!vma)
|
||||
|
@ -89,6 +96,7 @@ survive:
|
|||
flush_tlb_page(vma, address);
|
||||
out:
|
||||
up_read(&mm->mmap_sem);
|
||||
out_nosemaphore:
|
||||
return(err);
|
||||
|
||||
/*
|
||||
|
@ -125,7 +133,15 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
|
|||
}
|
||||
else if(current->mm == NULL)
|
||||
panic("Segfault with no mm");
|
||||
err = handle_page_fault(address, ip, is_write, is_user, &si.si_code);
|
||||
|
||||
if (SEGV_IS_FIXABLE(&fi))
|
||||
err = handle_page_fault(address, ip, is_write, is_user, &si.si_code);
|
||||
else {
|
||||
err = -EFAULT;
|
||||
/* A thread accessed NULL, we get a fault, but CR2 is invalid.
|
||||
* This code is used in __do_copy_from_user() of TT mode. */
|
||||
address = 0;
|
||||
}
|
||||
|
||||
catcher = current->thread.fault_catcher;
|
||||
if(!err)
|
||||
|
|
|
@ -22,8 +22,15 @@ int __do_copy_from_user(void *to, const void *from, int n,
|
|||
__do_copy, &faulted);
|
||||
TASK_REGS(get_current())->tt = save;
|
||||
|
||||
if(!faulted) return(0);
|
||||
else return(n - (fault - (unsigned long) from));
|
||||
if(!faulted)
|
||||
return 0;
|
||||
else if (fault)
|
||||
return n - (fault - (unsigned long) from);
|
||||
else
|
||||
/* In case of a general protection fault, we don't have the
|
||||
* fault address, so NULL is used instead. Pretend we didn't
|
||||
* copy anything. */
|
||||
return n;
|
||||
}
|
||||
|
||||
static void __do_strncpy(void *dst, const void *src, int count)
|
||||
|
|
|
@ -31,6 +31,8 @@ static char *uml_dir = UML_DIR;
|
|||
/* Changed by set_umid */
|
||||
static int umid_is_random = 1;
|
||||
static int umid_inited = 0;
|
||||
/* Have we created the files? Should we remove them? */
|
||||
static int umid_owned = 0;
|
||||
|
||||
static int make_umid(int (*printer)(const char *fmt, ...));
|
||||
|
||||
|
@ -82,20 +84,21 @@ int __init umid_file_name(char *name, char *buf, int len)
|
|||
|
||||
extern int tracing_pid;
|
||||
|
||||
static int __init create_pid_file(void)
|
||||
static void __init create_pid_file(void)
|
||||
{
|
||||
char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
|
||||
char pid[sizeof("nnnnn\0")];
|
||||
int fd, n;
|
||||
|
||||
if(umid_file_name("pid", file, sizeof(file))) return 0;
|
||||
if(umid_file_name("pid", file, sizeof(file)))
|
||||
return;
|
||||
|
||||
fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))),
|
||||
0644);
|
||||
if(fd < 0){
|
||||
printf("Open of machine pid file \"%s\" failed: %s\n",
|
||||
file, strerror(-fd));
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
sprintf(pid, "%d\n", os_getpid());
|
||||
|
@ -103,7 +106,6 @@ static int __init create_pid_file(void)
|
|||
if(n != strlen(pid))
|
||||
printf("Write of pid file failed - err = %d\n", -n);
|
||||
os_close_file(fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int actually_do_remove(char *dir)
|
||||
|
@ -147,7 +149,8 @@ static int actually_do_remove(char *dir)
|
|||
void remove_umid_dir(void)
|
||||
{
|
||||
char dir[strlen(uml_dir) + UMID_LEN + 1];
|
||||
if(!umid_inited) return;
|
||||
if (!umid_owned)
|
||||
return;
|
||||
|
||||
sprintf(dir, "%s%s", uml_dir, umid);
|
||||
actually_do_remove(dir);
|
||||
|
@ -155,11 +158,12 @@ void remove_umid_dir(void)
|
|||
|
||||
char *get_umid(int only_if_set)
|
||||
{
|
||||
if(only_if_set && umid_is_random) return(NULL);
|
||||
return(umid);
|
||||
if(only_if_set && umid_is_random)
|
||||
return NULL;
|
||||
return umid;
|
||||
}
|
||||
|
||||
int not_dead_yet(char *dir)
|
||||
static int not_dead_yet(char *dir)
|
||||
{
|
||||
char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
|
||||
char pid[sizeof("nnnnn\0")], *end;
|
||||
|
@ -193,7 +197,8 @@ int not_dead_yet(char *dir)
|
|||
(p == CHOOSE_MODE(tracing_pid, os_getpid())))
|
||||
dead = 1;
|
||||
}
|
||||
if(!dead) return(1);
|
||||
if(!dead)
|
||||
return(1);
|
||||
return(actually_do_remove(dir));
|
||||
}
|
||||
|
||||
|
@ -232,16 +237,13 @@ static int __init make_uml_dir(void)
|
|||
strlcpy(dir, home, sizeof(dir));
|
||||
uml_dir++;
|
||||
}
|
||||
strlcat(dir, uml_dir, sizeof(dir));
|
||||
len = strlen(dir);
|
||||
strncat(dir, uml_dir, sizeof(dir) - len);
|
||||
len = strlen(dir);
|
||||
if((len > 0) && (len < sizeof(dir) - 1) && (dir[len - 1] != '/')){
|
||||
dir[len] = '/';
|
||||
dir[len + 1] = '\0';
|
||||
}
|
||||
if (len > 0 && dir[len - 1] != '/')
|
||||
strlcat(dir, "/", sizeof(dir));
|
||||
|
||||
uml_dir = malloc(strlen(dir) + 1);
|
||||
if(uml_dir == NULL){
|
||||
if (uml_dir == NULL) {
|
||||
printf("make_uml_dir : malloc failed, errno = %d\n", errno);
|
||||
exit(1);
|
||||
}
|
||||
|
@ -286,6 +288,7 @@ static int __init make_umid(int (*printer)(const char *fmt, ...))
|
|||
if(errno == EEXIST){
|
||||
if(not_dead_yet(tmp)){
|
||||
(*printer)("umid '%s' is in use\n", umid);
|
||||
umid_owned = 0;
|
||||
return(-1);
|
||||
}
|
||||
err = mkdir(tmp, 0777);
|
||||
|
@ -296,7 +299,8 @@ static int __init make_umid(int (*printer)(const char *fmt, ...))
|
|||
return(-1);
|
||||
}
|
||||
|
||||
return(0);
|
||||
umid_owned = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__uml_setup("uml_dir=", set_uml_dir,
|
||||
|
@ -309,7 +313,8 @@ static int __init make_umid_setup(void)
|
|||
/* one function with the ordering we need ... */
|
||||
make_uml_dir();
|
||||
make_umid(printf);
|
||||
return create_pid_file();
|
||||
create_pid_file();
|
||||
return 0;
|
||||
}
|
||||
__uml_postsetup(make_umid_setup);
|
||||
|
||||
|
|
|
@ -402,8 +402,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
|||
__pci_mmap_set_flags(dev, vma, mmap_state);
|
||||
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
|
||||
|
||||
ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
vma->vm_end - vma->vm_start,vma->vm_page_prot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ _F(int, pcibios_fixup, (void), { return 0; });
|
|||
_F(int, get_rtc_time, (time_t* t), { return 0; });
|
||||
_F(int, set_rtc_time, (time_t t), { return 0; });
|
||||
|
||||
#if CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
_F(void, calibrate_ccount, (void),
|
||||
{
|
||||
printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
|
||||
|
|
|
@ -457,7 +457,7 @@ int
|
|||
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
|
||||
{
|
||||
/* see asm/coprocessor.h for this magic number 16 */
|
||||
#if TOTAL_CPEXTRA_SIZE > 16
|
||||
#if XTENSA_CP_EXTRA_SIZE > 16
|
||||
do_save_fpregs (r, regs, task);
|
||||
|
||||
/* For now, bit 16 means some extra state may be present: */
|
||||
|
|
|
@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
# endif
|
||||
#endif
|
||||
|
||||
#if CONFIG_PCI
|
||||
#ifdef CONFIG_PCI
|
||||
platform_pcibios_init();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -182,7 +182,7 @@ restore_cpextra (struct _cpstate *buf)
|
|||
|
||||
struct task_struct *tsk = current;
|
||||
release_all_cp(tsk);
|
||||
return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE);
|
||||
return __copy_from_user(tsk->thread.cpextra, buf, XTENSA_CP_EXTRA_SIZE);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ void __init time_init(void)
|
|||
* speed for the CALIBRATE.
|
||||
*/
|
||||
|
||||
#if CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
printk("Calibrating CPU frequency ");
|
||||
platform_calibrate_ccount();
|
||||
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
|
||||
|
|
|
@ -239,7 +239,7 @@ void __init mem_init(void)
|
|||
high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
|
||||
highmemsize = 0;
|
||||
|
||||
#if CONFIG_HIGHMEM
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#error HIGHGMEM not implemented in init.c
|
||||
#endif
|
||||
|
||||
|
|
|
@ -669,6 +669,7 @@ void class_device_destroy(struct class *cls, dev_t devt)
|
|||
int class_device_rename(struct class_device *class_dev, char *new_name)
|
||||
{
|
||||
int error = 0;
|
||||
char *old_class_name = NULL, *new_class_name = NULL;
|
||||
|
||||
class_dev = class_device_get(class_dev);
|
||||
if (!class_dev)
|
||||
|
@ -677,12 +678,24 @@ int class_device_rename(struct class_device *class_dev, char *new_name)
|
|||
pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id,
|
||||
new_name);
|
||||
|
||||
if (class_dev->dev)
|
||||
old_class_name = make_class_name(class_dev);
|
||||
|
||||
strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN);
|
||||
|
||||
error = kobject_rename(&class_dev->kobj, new_name);
|
||||
|
||||
if (class_dev->dev) {
|
||||
new_class_name = make_class_name(class_dev);
|
||||
sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
|
||||
new_class_name);
|
||||
sysfs_remove_link(&class_dev->dev->kobj, old_class_name);
|
||||
}
|
||||
class_device_put(class_dev);
|
||||
|
||||
kfree(old_class_name);
|
||||
kfree(new_class_name);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,9 @@
|
|||
*/
|
||||
void device_bind_driver(struct device * dev)
|
||||
{
|
||||
if (klist_node_attached(&dev->knode_driver))
|
||||
return;
|
||||
|
||||
pr_debug("bound device '%s' to driver '%s'\n",
|
||||
dev->bus_id, dev->driver->name);
|
||||
klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
|
||||
|
|
|
@ -172,7 +172,7 @@ struct bulk_cs_wrap {
|
|||
*/
|
||||
struct ub_dev;
|
||||
|
||||
#define UB_MAX_REQ_SG 4
|
||||
#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
|
||||
#define UB_MAX_SECTORS 64
|
||||
|
||||
/*
|
||||
|
@ -387,7 +387,7 @@ struct ub_dev {
|
|||
struct bulk_cs_wrap work_bcs;
|
||||
struct usb_ctrlrequest work_cr;
|
||||
|
||||
int sg_stat[UB_MAX_REQ_SG+1];
|
||||
int sg_stat[6];
|
||||
struct ub_scsi_trace tr;
|
||||
};
|
||||
|
||||
|
@ -525,12 +525,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
|
|||
"qlen %d qmax %d\n",
|
||||
sc->cmd_queue.qlen, sc->cmd_queue.qmax);
|
||||
cnt += sprintf(page + cnt,
|
||||
"sg %d %d %d %d %d\n",
|
||||
"sg %d %d %d %d %d .. %d\n",
|
||||
sc->sg_stat[0],
|
||||
sc->sg_stat[1],
|
||||
sc->sg_stat[2],
|
||||
sc->sg_stat[3],
|
||||
sc->sg_stat[4]);
|
||||
sc->sg_stat[4],
|
||||
sc->sg_stat[5]);
|
||||
|
||||
list_for_each (p, &sc->luns) {
|
||||
lun = list_entry(p, struct ub_lun, link);
|
||||
|
@ -835,7 +836,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
|
|||
return -1;
|
||||
}
|
||||
cmd->nsg = n_elem;
|
||||
sc->sg_stat[n_elem]++;
|
||||
sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
|
||||
|
||||
/*
|
||||
* build the command
|
||||
|
@ -891,7 +892,7 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
|
|||
return -1;
|
||||
}
|
||||
cmd->nsg = n_elem;
|
||||
sc->sg_stat[n_elem]++;
|
||||
sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
|
||||
|
||||
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
|
||||
cmd->cdb_len = rq->cmd_len;
|
||||
|
@ -1010,7 +1011,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
sc->last_pipe = sc->send_bulk_pipe;
|
||||
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
|
||||
bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
|
||||
sc->work_urb.transfer_flags = 0;
|
||||
|
||||
/* Fill what we shouldn't be filling, because usb-storage did so. */
|
||||
sc->work_urb.actual_length = 0;
|
||||
|
@ -1019,7 +1019,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
|
||||
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
|
||||
/* XXX Clear stalls */
|
||||
printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
|
||||
ub_complete(&sc->work_done);
|
||||
return rc;
|
||||
}
|
||||
|
@ -1190,11 +1189,9 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
return;
|
||||
}
|
||||
if (urb->status != 0) {
|
||||
printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */
|
||||
goto Bad_End;
|
||||
}
|
||||
if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
|
||||
printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */
|
||||
/* XXX Must do reset here to unconfuse the device */
|
||||
goto Bad_End;
|
||||
}
|
||||
|
@ -1395,14 +1392,12 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
|
||||
page_address(sg->page) + sg->offset, sg->length,
|
||||
ub_urb_complete, sc);
|
||||
sc->work_urb.transfer_flags = 0;
|
||||
sc->work_urb.actual_length = 0;
|
||||
sc->work_urb.error_count = 0;
|
||||
sc->work_urb.status = 0;
|
||||
|
||||
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
|
||||
/* XXX Clear stalls */
|
||||
printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
|
||||
ub_complete(&sc->work_done);
|
||||
ub_state_done(sc, cmd, rc);
|
||||
return;
|
||||
|
@ -1442,7 +1437,6 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
sc->last_pipe = sc->recv_bulk_pipe;
|
||||
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
|
||||
&sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
|
||||
sc->work_urb.transfer_flags = 0;
|
||||
sc->work_urb.actual_length = 0;
|
||||
sc->work_urb.error_count = 0;
|
||||
sc->work_urb.status = 0;
|
||||
|
@ -1563,7 +1557,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
|
|||
|
||||
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
|
||||
(unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
|
||||
sc->work_urb.transfer_flags = 0;
|
||||
sc->work_urb.actual_length = 0;
|
||||
sc->work_urb.error_count = 0;
|
||||
sc->work_urb.status = 0;
|
||||
|
@ -2000,17 +1993,16 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
|
|||
|
||||
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
|
||||
(unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
|
||||
sc->work_urb.transfer_flags = 0;
|
||||
sc->work_urb.actual_length = 0;
|
||||
sc->work_urb.error_count = 0;
|
||||
sc->work_urb.status = 0;
|
||||
|
||||
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
|
||||
if (rc == -EPIPE) {
|
||||
printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
|
||||
printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
|
||||
sc->name); /* P3 */
|
||||
} else {
|
||||
printk(KERN_WARNING
|
||||
printk(KERN_NOTICE
|
||||
"%s: Unable to submit GetMaxLUN (%d)\n",
|
||||
sc->name, rc);
|
||||
}
|
||||
|
@ -2028,6 +2020,18 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
|
|||
del_timer_sync(&timer);
|
||||
usb_kill_urb(&sc->work_urb);
|
||||
|
||||
if ((rc = sc->work_urb.status) < 0) {
|
||||
if (rc == -EPIPE) {
|
||||
printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
|
||||
sc->name); /* P3 */
|
||||
} else {
|
||||
printk(KERN_NOTICE
|
||||
"%s: Error at GetMaxLUN (%d)\n",
|
||||
sc->name, rc);
|
||||
}
|
||||
goto err_io;
|
||||
}
|
||||
|
||||
if (sc->work_urb.actual_length != 1) {
|
||||
printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
|
||||
sc->work_urb.actual_length); /* P3 */
|
||||
|
@ -2048,6 +2052,7 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
|
|||
kfree(p);
|
||||
return nluns;
|
||||
|
||||
err_io:
|
||||
err_submit:
|
||||
kfree(p);
|
||||
err_alloc:
|
||||
|
@ -2080,7 +2085,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
|
|||
|
||||
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
|
||||
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
|
||||
sc->work_urb.transfer_flags = 0;
|
||||
sc->work_urb.actual_length = 0;
|
||||
sc->work_urb.error_count = 0;
|
||||
sc->work_urb.status = 0;
|
||||
|
@ -2213,8 +2217,10 @@ static int ub_probe(struct usb_interface *intf,
|
|||
* This is needed to clear toggles. It is a problem only if we do
|
||||
* `rmmod ub && modprobe ub` without disconnects, but we like that.
|
||||
*/
|
||||
#if 0 /* iPod Mini fails if we do this (big white iPod works) */
|
||||
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
|
||||
ub_probe_clear_stall(sc, sc->send_bulk_pipe);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The way this is used by the startup code is a little specific.
|
||||
|
@ -2241,10 +2247,10 @@ static int ub_probe(struct usb_interface *intf,
|
|||
for (i = 0; i < 3; i++) {
|
||||
if ((rc = ub_sync_getmaxlun(sc)) < 0) {
|
||||
/*
|
||||
* Some devices (i.e. Iomega Zip100) need this --
|
||||
* apparently the bulk pipes get STALLed when the
|
||||
* GetMaxLUN request is processed.
|
||||
* XXX I have a ZIP-100, verify it does this.
|
||||
* This segment is taken from usb-storage. They say
|
||||
* that ZIP-100 needs this, but my own ZIP-100 works
|
||||
* fine without this.
|
||||
* Still, it does not seem to hurt anything.
|
||||
*/
|
||||
if (rc == -EPIPE) {
|
||||
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
|
||||
|
@ -2313,7 +2319,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
|
|||
disk->first_minor = lun->id * UB_MINORS_PER_MAJOR;
|
||||
disk->fops = &ub_bd_fops;
|
||||
disk->private_data = lun;
|
||||
disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */
|
||||
disk->driverfs_dev = &sc->intf->dev;
|
||||
|
||||
rc = -ENOMEM;
|
||||
if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
|
||||
|
@ -2466,9 +2472,6 @@ static int __init ub_init(void)
|
|||
{
|
||||
int rc;
|
||||
|
||||
/* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n",
|
||||
sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun));
|
||||
|
||||
if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
|
||||
goto err_regblkdev;
|
||||
devfs_mk_dir(DEVFS_NAME);
|
||||
|
|
|
@ -273,7 +273,6 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
vma->vm_flags |= VM_IO;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
addr = __pa(addr);
|
||||
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
|
||||
PAGE_SIZE, vma->vm_page_prot)) {
|
||||
|
|
|
@ -2620,7 +2620,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
|
|||
spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
|
||||
if (!list_empty(&(intf->waiting_msgs))) {
|
||||
list_add_tail(&(msg->link), &(intf->waiting_msgs));
|
||||
spin_unlock(&(intf->waiting_msgs_lock));
|
||||
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
|
||||
goto out_unlock;
|
||||
}
|
||||
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
|
||||
|
@ -2629,9 +2629,9 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
|
|||
if (rv > 0) {
|
||||
/* Could not handle the message now, just add it to a
|
||||
list to handle later. */
|
||||
spin_lock(&(intf->waiting_msgs_lock));
|
||||
spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
|
||||
list_add_tail(&(msg->link), &(intf->waiting_msgs));
|
||||
spin_unlock(&(intf->waiting_msgs_lock));
|
||||
spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
|
||||
} else if (rv == 0) {
|
||||
ipmi_free_smi_msg(msg);
|
||||
}
|
||||
|
|
|
@ -418,12 +418,11 @@ config SENSORS_HDAPS
|
|||
help
|
||||
This driver provides support for the IBM Hard Drive Active Protection
|
||||
System (hdaps), which provides an accelerometer and other misc. data.
|
||||
Supported laptops include the IBM ThinkPad T41, T42, T43, and R51.
|
||||
The accelerometer data is readable via sysfs.
|
||||
ThinkPads starting with the R50, T41, and X40 are supported. The
|
||||
accelerometer data is readable via sysfs.
|
||||
|
||||
This driver also provides an input class device, allowing the
|
||||
laptop to act as a pinball machine-esque mouse. This is off by
|
||||
default but enabled via sysfs or the module parameter "mousedev".
|
||||
This driver also provides an absolute input class device, allowing
|
||||
the laptop to act as a pinball machine-esque joystick.
|
||||
|
||||
Say Y here if you have an applicable laptop and want to experience
|
||||
the awesome power of hdaps.
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
* Copyright (C) 2005 Robert Love <rml@novell.com>
|
||||
* Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
|
||||
*
|
||||
* The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad
|
||||
* T41, T42, T43, R50, R50p, R51, and X40, at least. It provides a basic
|
||||
* two-axis accelerometer and other data, such as the device's temperature.
|
||||
* The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
|
||||
* starting with the R40, T41, and X40. It provides a basic two-axis
|
||||
* accelerometer and other data, such as the device's temperature.
|
||||
*
|
||||
* This driver is based on the document by Mark A. Smith available at
|
||||
* http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
|
||||
|
@ -487,24 +487,19 @@ static struct attribute_group hdaps_attribute_group = {
|
|||
|
||||
/* Module stuff */
|
||||
|
||||
/*
|
||||
* XXX: We should be able to return nonzero and halt the detection process.
|
||||
* But there is a bug in dmi_check_system() where a nonzero return from the
|
||||
* first match will result in a return of failure from dmi_check_system().
|
||||
* I fixed this; the patch is 2.6-git. Once in a released tree, we can make
|
||||
* hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1.
|
||||
*/
|
||||
/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
|
||||
static int hdaps_dmi_match(struct dmi_system_id *id)
|
||||
{
|
||||
printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* hdaps_dmi_match_invert - found an inverted match. */
|
||||
static int hdaps_dmi_match_invert(struct dmi_system_id *id)
|
||||
{
|
||||
hdaps_invert = 1;
|
||||
printk(KERN_INFO "hdaps: inverting axis readings.\n");
|
||||
return 0;
|
||||
return hdaps_dmi_match(id);
|
||||
}
|
||||
|
||||
#define HDAPS_DMI_MATCH_NORMAL(model) { \
|
||||
|
@ -534,6 +529,7 @@ static int __init hdaps_init(void)
|
|||
HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"),
|
||||
HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"),
|
||||
HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"),
|
||||
|
@ -541,6 +537,7 @@ static int __init hdaps_init(void)
|
|||
HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
|
||||
HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
|
||||
{ .ident = NULL }
|
||||
};
|
||||
|
||||
|
|
|
@ -245,6 +245,18 @@ config I2C_KEYWEST
|
|||
This support is also available as a module. If so, the module
|
||||
will be called i2c-keywest.
|
||||
|
||||
config I2C_PMAC_SMU
|
||||
tristate "Powermac SMU I2C interface"
|
||||
depends on I2C && PMAC_SMU
|
||||
help
|
||||
This supports the use of the I2C interface in the SMU
|
||||
chip on recent Apple machines like the iMac G5. It is used
|
||||
among others by the thermal control driver for those machines.
|
||||
Say Y if you have such a machine.
|
||||
|
||||
This support is also available as a module. If so, the module
|
||||
will be called i2c-pmac-smu.
|
||||
|
||||
config I2C_MPC
|
||||
tristate "MPC107/824x/85xx/52xx"
|
||||
depends on I2C && PPC32
|
||||
|
|
|
@ -20,6 +20,7 @@ obj-$(CONFIG_I2C_ITE) += i2c-ite.o
|
|||
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
|
||||
obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
|
||||
obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
|
||||
obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o
|
||||
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
|
||||
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
|
||||
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
|
||||
|
|
|
@ -0,0 +1,316 @@
|
|||
/*
|
||||
i2c Support for Apple SMU Controller
|
||||
|
||||
Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
|
||||
<benh@kernel.crashing.org>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/of_device.h>
|
||||
#include <asm/smu.h>
|
||||
|
||||
static int probe;
|
||||
|
||||
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
|
||||
MODULE_DESCRIPTION("I2C driver for Apple's SMU");
|
||||
MODULE_LICENSE("GPL");
|
||||
module_param(probe, bool, 0);
|
||||
|
||||
|
||||
/* Physical interface */
|
||||
struct smu_iface
|
||||
{
|
||||
struct i2c_adapter adapter;
|
||||
struct completion complete;
|
||||
u32 busid;
|
||||
};
|
||||
|
||||
static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc)
|
||||
{
|
||||
struct smu_iface *iface = misc;
|
||||
complete(&iface->complete);
|
||||
}
|
||||
|
||||
/*
|
||||
* SMBUS-type transfer entrypoint
|
||||
*/
|
||||
static s32 smu_smbus_xfer( struct i2c_adapter* adap,
|
||||
u16 addr,
|
||||
unsigned short flags,
|
||||
char read_write,
|
||||
u8 command,
|
||||
int size,
|
||||
union i2c_smbus_data* data)
|
||||
{
|
||||
struct smu_iface *iface = i2c_get_adapdata(adap);
|
||||
struct smu_i2c_cmd cmd;
|
||||
int rc = 0;
|
||||
int read = (read_write == I2C_SMBUS_READ);
|
||||
|
||||
cmd.info.bus = iface->busid;
|
||||
cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00);
|
||||
|
||||
/* Prepare datas & select mode */
|
||||
switch (size) {
|
||||
case I2C_SMBUS_QUICK:
|
||||
cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
|
||||
cmd.info.datalen = 0;
|
||||
break;
|
||||
case I2C_SMBUS_BYTE:
|
||||
cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
|
||||
cmd.info.datalen = 1;
|
||||
if (!read)
|
||||
cmd.info.data[0] = data->byte;
|
||||
break;
|
||||
case I2C_SMBUS_BYTE_DATA:
|
||||
cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
|
||||
cmd.info.datalen = 1;
|
||||
cmd.info.sublen = 1;
|
||||
cmd.info.subaddr[0] = command;
|
||||
cmd.info.subaddr[1] = 0;
|
||||
cmd.info.subaddr[2] = 0;
|
||||
if (!read)
|
||||
cmd.info.data[0] = data->byte;
|
||||
break;
|
||||
case I2C_SMBUS_WORD_DATA:
|
||||
cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
|
||||
cmd.info.datalen = 2;
|
||||
cmd.info.sublen = 1;
|
||||
cmd.info.subaddr[0] = command;
|
||||
cmd.info.subaddr[1] = 0;
|
||||
cmd.info.subaddr[2] = 0;
|
||||
if (!read) {
|
||||
cmd.info.data[0] = data->byte & 0xff;
|
||||
cmd.info.data[1] = (data->byte >> 8) & 0xff;
|
||||
}
|
||||
break;
|
||||
/* Note that these are broken vs. the expected smbus API where
|
||||
* on reads, the lenght is actually returned from the function,
|
||||
* but I think the current API makes no sense and I don't want
|
||||
* any driver that I haven't verified for correctness to go
|
||||
* anywhere near a pmac i2c bus anyway ...
|
||||
*/
|
||||
case I2C_SMBUS_BLOCK_DATA:
|
||||
cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
|
||||
cmd.info.datalen = data->block[0] + 1;
|
||||
if (cmd.info.datalen > 6)
|
||||
return -EINVAL;
|
||||
if (!read)
|
||||
memcpy(cmd.info.data, data->block, cmd.info.datalen);
|
||||
cmd.info.sublen = 1;
|
||||
cmd.info.subaddr[0] = command;
|
||||
cmd.info.subaddr[1] = 0;
|
||||
cmd.info.subaddr[2] = 0;
|
||||
break;
|
||||
case I2C_SMBUS_I2C_BLOCK_DATA:
|
||||
cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
|
||||
cmd.info.datalen = data->block[0];
|
||||
if (cmd.info.datalen > 7)
|
||||
return -EINVAL;
|
||||
if (!read)
|
||||
memcpy(cmd.info.data, &data->block[1],
|
||||
cmd.info.datalen);
|
||||
cmd.info.sublen = 1;
|
||||
cmd.info.subaddr[0] = command;
|
||||
cmd.info.subaddr[1] = 0;
|
||||
cmd.info.subaddr[2] = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Turn a standardsub read into a combined mode access */
|
||||
if (read_write == I2C_SMBUS_READ &&
|
||||
cmd.info.type == SMU_I2C_TRANSFER_STDSUB)
|
||||
cmd.info.type = SMU_I2C_TRANSFER_COMBINED;
|
||||
|
||||
/* Finish filling command and submit it */
|
||||
cmd.done = smu_i2c_done;
|
||||
cmd.misc = iface;
|
||||
rc = smu_queue_i2c(&cmd);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
wait_for_completion(&iface->complete);
|
||||
rc = cmd.status;
|
||||
|
||||
if (!read || rc < 0)
|
||||
return rc;
|
||||
|
||||
switch (size) {
|
||||
case I2C_SMBUS_BYTE:
|
||||
case I2C_SMBUS_BYTE_DATA:
|
||||
data->byte = cmd.info.data[0];
|
||||
break;
|
||||
case I2C_SMBUS_WORD_DATA:
|
||||
data->word = ((u16)cmd.info.data[1]) << 8;
|
||||
data->word |= cmd.info.data[0];
|
||||
break;
|
||||
/* Note that these are broken vs. the expected smbus API where
|
||||
* on reads, the lenght is actually returned from the function,
|
||||
* but I think the current API makes no sense and I don't want
|
||||
* any driver that I haven't verified for correctness to go
|
||||
* anywhere near a pmac i2c bus anyway ...
|
||||
*/
|
||||
case I2C_SMBUS_BLOCK_DATA:
|
||||
case I2C_SMBUS_I2C_BLOCK_DATA:
|
||||
memcpy(&data->block[0], cmd.info.data, cmd.info.datalen);
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u32
|
||||
smu_smbus_func(struct i2c_adapter * adapter)
|
||||
{
|
||||
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
|
||||
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
|
||||
I2C_FUNC_SMBUS_BLOCK_DATA;
|
||||
}
|
||||
|
||||
/* For now, we only handle combined mode (smbus) */
|
||||
static struct i2c_algorithm smu_algorithm = {
|
||||
.smbus_xfer = smu_smbus_xfer,
|
||||
.functionality = smu_smbus_func,
|
||||
};
|
||||
|
||||
static int create_iface(struct device_node *np, struct device *dev)
|
||||
{
|
||||
struct smu_iface* iface;
|
||||
u32 *reg, busid;
|
||||
int rc;
|
||||
|
||||
reg = (u32 *)get_property(np, "reg", NULL);
|
||||
if (reg == NULL) {
|
||||
printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
busid = *reg;
|
||||
|
||||
iface = kmalloc(sizeof(struct smu_iface), GFP_KERNEL);
|
||||
if (iface == NULL) {
|
||||
printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(iface, 0, sizeof(struct smu_iface));
|
||||
init_completion(&iface->complete);
|
||||
iface->busid = busid;
|
||||
|
||||
dev_set_drvdata(dev, iface);
|
||||
|
||||
sprintf(iface->adapter.name, "smu-i2c-%02x", busid);
|
||||
iface->adapter.algo = &smu_algorithm;
|
||||
iface->adapter.algo_data = NULL;
|
||||
iface->adapter.client_register = NULL;
|
||||
iface->adapter.client_unregister = NULL;
|
||||
i2c_set_adapdata(&iface->adapter, iface);
|
||||
iface->adapter.dev.parent = dev;
|
||||
|
||||
rc = i2c_add_adapter(&iface->adapter);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration "
|
||||
"failed\n", iface->adapter.name);
|
||||
i2c_set_adapdata(&iface->adapter, NULL);
|
||||
}
|
||||
|
||||
if (probe) {
|
||||
unsigned char addr;
|
||||
printk("Probe: ");
|
||||
for (addr = 0x00; addr <= 0x7f; addr++) {
|
||||
if (i2c_smbus_xfer(&iface->adapter,addr,
|
||||
0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
|
||||
printk("%02x ", addr);
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
printk(KERN_INFO "SMU i2c bus %x registered\n", busid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dispose_iface(struct device *dev)
|
||||
{
|
||||
struct smu_iface *iface = dev_get_drvdata(dev);
|
||||
int rc;
|
||||
|
||||
rc = i2c_del_adapter(&iface->adapter);
|
||||
i2c_set_adapdata(&iface->adapter, NULL);
|
||||
/* We aren't that prepared to deal with this... */
|
||||
if (rc)
|
||||
printk("i2c-pmac-smu.c: Failed to remove bus %s !\n",
|
||||
iface->adapter.name);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
kfree(iface);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int create_iface_of_platform(struct of_device* dev,
|
||||
const struct of_device_id *match)
|
||||
{
|
||||
return create_iface(dev->node, &dev->dev);
|
||||
}
|
||||
|
||||
|
||||
static int dispose_iface_of_platform(struct of_device* dev)
|
||||
{
|
||||
return dispose_iface(&dev->dev);
|
||||
}
|
||||
|
||||
|
||||
static struct of_device_id i2c_smu_match[] =
|
||||
{
|
||||
{
|
||||
.compatible = "smu-i2c",
|
||||
},
|
||||
{},
|
||||
};
|
||||
static struct of_platform_driver i2c_smu_of_platform_driver =
|
||||
{
|
||||
.name = "i2c-smu",
|
||||
.match_table = i2c_smu_match,
|
||||
.probe = create_iface_of_platform,
|
||||
.remove = dispose_iface_of_platform
|
||||
};
|
||||
|
||||
|
||||
static int __init i2c_pmac_smu_init(void)
|
||||
{
|
||||
of_register_driver(&i2c_smu_of_platform_driver);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void __exit i2c_pmac_smu_cleanup(void)
|
||||
{
|
||||
of_unregister_driver(&i2c_smu_of_platform_driver);
|
||||
}
|
||||
|
||||
module_init(i2c_pmac_smu_init);
|
||||
module_exit(i2c_pmac_smu_cleanup);
|
|
@ -308,6 +308,7 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
|
|||
MATCH_BIT(ledbit, LED_MAX);
|
||||
MATCH_BIT(sndbit, SND_MAX);
|
||||
MATCH_BIT(ffbit, FF_MAX);
|
||||
MATCH_BIT(swbit, SW_MAX);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -209,9 +209,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
|
|||
bcs->mode = mode;
|
||||
|
||||
// Cancel all USB transfers on this B channel
|
||||
b_out->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
|
||||
usb_unlink_urb(b_out->urb[0]);
|
||||
b_out->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
|
||||
usb_unlink_urb(b_out->urb[1]);
|
||||
b_out->busy = 0;
|
||||
|
||||
|
|
|
@ -645,9 +645,7 @@ void st5481_in_mode(struct st5481_in *in, int mode)
|
|||
|
||||
in->mode = mode;
|
||||
|
||||
in->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
|
||||
usb_unlink_urb(in->urb[0]);
|
||||
in->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
|
||||
usb_unlink_urb(in->urb[1]);
|
||||
|
||||
if (in->mode != L1_MODE_NULL) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -599,7 +599,7 @@ thermostat_init(void)
|
|||
sensor_location[2] = "?";
|
||||
}
|
||||
|
||||
of_dev = of_platform_device_create(np, "temperatures");
|
||||
of_dev = of_platform_device_create(np, "temperatures", NULL);
|
||||
|
||||
if (of_dev == NULL) {
|
||||
printk(KERN_ERR "Can't register temperatures device !\n");
|
||||
|
|
|
@ -2051,7 +2051,7 @@ static int __init therm_pm72_init(void)
|
|||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
of_dev = of_platform_device_create(np, "temperature");
|
||||
of_dev = of_platform_device_create(np, "temperature", NULL);
|
||||
if (of_dev == NULL) {
|
||||
printk(KERN_ERR "Can't register FCU platform device !\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -504,7 +504,7 @@ g4fan_init( void )
|
|||
}
|
||||
if( !(np=of_find_node_by_name(NULL, "fan")) )
|
||||
return -ENODEV;
|
||||
x.of_dev = of_platform_device_create( np, "temperature" );
|
||||
x.of_dev = of_platform_device_create(np, "temperature", NULL);
|
||||
of_node_put( np );
|
||||
|
||||
if( !x.of_dev ) {
|
||||
|
|
|
@ -763,21 +763,21 @@ static void set_pll(struct bttv *btv)
|
|||
/* no PLL needed */
|
||||
if (btv->pll.pll_current == 0)
|
||||
return;
|
||||
vprintk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
|
||||
btv->c.nr,btv->pll.pll_ifreq);
|
||||
bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
|
||||
btv->c.nr,btv->pll.pll_ifreq);
|
||||
btwrite(0x00,BT848_TGCTRL);
|
||||
btwrite(0x00,BT848_PLL_XCI);
|
||||
btv->pll.pll_current = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
vprintk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
|
||||
btv->pll.pll_ifreq, btv->pll.pll_ofreq);
|
||||
bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
|
||||
btv->pll.pll_ifreq, btv->pll.pll_ofreq);
|
||||
set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq);
|
||||
|
||||
for (i=0; i<10; i++) {
|
||||
/* Let other people run while the PLL stabilizes */
|
||||
vprintk(".");
|
||||
bttv_printk(".");
|
||||
msleep(10);
|
||||
|
||||
if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) {
|
||||
|
@ -785,12 +785,12 @@ static void set_pll(struct bttv *btv)
|
|||
} else {
|
||||
btwrite(0x08,BT848_TGCTRL);
|
||||
btv->pll.pll_current = btv->pll.pll_ofreq;
|
||||
vprintk(" ok\n");
|
||||
bttv_printk(" ok\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
btv->pll.pll_current = -1;
|
||||
vprintk("failed\n");
|
||||
bttv_printk("failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
|
|||
extern int init_bttv_i2c(struct bttv *btv);
|
||||
extern int fini_bttv_i2c(struct bttv *btv);
|
||||
|
||||
#define vprintk if (bttv_verbose) printk
|
||||
#define bttv_printk if (bttv_verbose) printk
|
||||
#define dprintk if (bttv_debug >= 1) printk
|
||||
#define d2printk if (bttv_debug >= 2) printk
|
||||
|
||||
|
|
|
@ -48,8 +48,8 @@ struct ucb1x00_ts {
|
|||
u16 x_res;
|
||||
u16 y_res;
|
||||
|
||||
int restart:1;
|
||||
int adcsync:1;
|
||||
unsigned int restart:1;
|
||||
unsigned int adcsync:1;
|
||||
};
|
||||
|
||||
static int adcsync;
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/doc2000.h>
|
||||
|
||||
#define DEBUG 0
|
||||
#define DEBUG_ECC 0
|
||||
/* need to undef it (from asm/termbits.h) */
|
||||
#undef B0
|
||||
|
||||
|
@ -249,7 +249,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
|
|||
lambda[j] ^= Alpha_to[modnn(u + tmp)];
|
||||
}
|
||||
}
|
||||
#if DEBUG >= 1
|
||||
#if DEBUG_ECC >= 1
|
||||
/* Test code that verifies the erasure locator polynomial just constructed
|
||||
Needed only for decoder debugging. */
|
||||
|
||||
|
@ -276,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
|
|||
count = -1;
|
||||
goto finish;
|
||||
}
|
||||
#if DEBUG >= 2
|
||||
#if DEBUG_ECC >= 2
|
||||
printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
|
||||
for (i = 0; i < count; i++)
|
||||
printf("%d ", loc[i]);
|
||||
|
@ -409,7 +409,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
|
|||
den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
|
||||
}
|
||||
if (den == 0) {
|
||||
#if DEBUG >= 1
|
||||
#if DEBUG_ECC >= 1
|
||||
printf("\n ERROR: denominator = 0\n");
|
||||
#endif
|
||||
/* Convert to dual- basis */
|
||||
|
|
|
@ -1094,7 +1094,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
|
|||
|
||||
outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
|
||||
|
||||
if (inb_p(e8390_base) & E8390_TRANS)
|
||||
if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
|
||||
{
|
||||
printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
|
||||
dev->name);
|
||||
|
|
|
@ -1653,7 +1653,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
|
|||
int old_features = bond_dev->features;
|
||||
int res = 0;
|
||||
|
||||
if (slave_dev->do_ioctl == NULL) {
|
||||
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
|
||||
slave_dev->do_ioctl == NULL) {
|
||||
printk(KERN_WARNING DRV_NAME
|
||||
": Warning : no link monitoring support for %s\n",
|
||||
slave_dev->name);
|
||||
|
|
|
@ -100,11 +100,11 @@ VERSION 2.2LK <2005/01/25>
|
|||
|
||||
#ifdef CONFIG_R8169_NAPI
|
||||
#define rtl8169_rx_skb netif_receive_skb
|
||||
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
|
||||
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
|
||||
#define rtl8169_rx_quota(count, quota) min(count, quota)
|
||||
#else
|
||||
#define rtl8169_rx_skb netif_rx
|
||||
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
|
||||
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
|
||||
#define rtl8169_rx_quota(count, quota) count
|
||||
#endif
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include "skge.h"
|
||||
|
||||
#define DRV_NAME "skge"
|
||||
#define DRV_VERSION "1.0"
|
||||
#define DRV_VERSION "1.1"
|
||||
#define PFX DRV_NAME " "
|
||||
|
||||
#define DEFAULT_TX_RING_SIZE 128
|
||||
|
@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
|
|||
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
|
||||
static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
|
||||
|
||||
/* Don't need to look at whole 16K.
|
||||
* last interesting register is descriptor poll timer.
|
||||
*/
|
||||
#define SKGE_REGS_LEN (29*128)
|
||||
|
||||
static int skge_get_regs_len(struct net_device *dev)
|
||||
{
|
||||
return SKGE_REGS_LEN;
|
||||
return 0x4000;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns copy of control register region
|
||||
* I/O region is divided into banks and certain regions are unreadable
|
||||
* Returns copy of whole control register region
|
||||
* Note: skip RAM address register because accessing it will
|
||||
* cause bus hangs!
|
||||
*/
|
||||
static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||
void *p)
|
||||
{
|
||||
const struct skge_port *skge = netdev_priv(dev);
|
||||
unsigned long offs;
|
||||
const void __iomem *io = skge->hw->regs;
|
||||
static const unsigned long bankmap
|
||||
= (1<<0) | (1<<2) | (1<<8) | (1<<9)
|
||||
| (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
|
||||
| (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
|
||||
| (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
|
||||
|
||||
regs->version = 1;
|
||||
for (offs = 0; offs < regs->len; offs += 128) {
|
||||
u32 len = min_t(u32, 128, regs->len - offs);
|
||||
memset(p, 0, regs->len);
|
||||
memcpy_fromio(p, io, B3_RAM_ADDR);
|
||||
|
||||
if (bankmap & (1<<(offs/128)))
|
||||
memcpy_fromio(p + offs, io + offs, len);
|
||||
else
|
||||
memset(p + offs, 0, len);
|
||||
}
|
||||
memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
||||
regs->len - B3_RI_WTO_R1);
|
||||
}
|
||||
|
||||
/* Wake on Lan only supported on Yukon chps with rev 1 or above */
|
||||
|
@ -775,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
|
||||
{
|
||||
struct sk_buff *skb = dev_alloc_skb(size);
|
||||
|
||||
if (likely(skb)) {
|
||||
skb->dev = dev;
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Allocate and setup a new buffer for receiving */
|
||||
static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
|
||||
struct sk_buff *skb, unsigned int bufsize)
|
||||
|
@ -858,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
|
|||
{
|
||||
struct skge_ring *ring = &skge->rx_ring;
|
||||
struct skge_element *e;
|
||||
unsigned int bufsize = skge->rx_buf_size;
|
||||
|
||||
e = ring->start;
|
||||
do {
|
||||
struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
skge_rx_setup(skge, e, skb, bufsize);
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
skge_rx_setup(skge, e, skb, skge->rx_buf_size);
|
||||
} while ( (e = e->next) != ring->start);
|
||||
|
||||
ring->to_clean = ring->start;
|
||||
|
@ -1666,6 +1643,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
|
|||
| GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
|
||||
}
|
||||
|
||||
/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
|
||||
static int is_yukon_lite_a0(struct skge_hw *hw)
|
||||
{
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
if (hw->chip_id != CHIP_ID_YUKON)
|
||||
return 0;
|
||||
|
||||
reg = skge_read32(hw, B2_FAR);
|
||||
skge_write8(hw, B2_FAR + 3, 0xff);
|
||||
ret = (skge_read8(hw, B2_FAR + 3) != 0);
|
||||
skge_write32(hw, B2_FAR, reg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void yukon_mac_init(struct skge_hw *hw, int port)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(hw->dev[port]);
|
||||
|
@ -1781,9 +1774,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
|
|||
/* Configure Rx MAC FIFO */
|
||||
skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
|
||||
reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
|
||||
if (hw->chip_id == CHIP_ID_YUKON_LITE &&
|
||||
hw->chip_rev >= CHIP_REV_YU_LITE_A3)
|
||||
|
||||
/* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
|
||||
if (is_yukon_lite_a0(hw))
|
||||
reg &= ~GMF_RX_F_FL_ON;
|
||||
|
||||
skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
|
||||
skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
|
||||
/*
|
||||
|
@ -2442,6 +2437,14 @@ static void yukon_set_multicast(struct net_device *dev)
|
|||
gma_write16(hw, port, GM_RX_CTRL, reg);
|
||||
}
|
||||
|
||||
static inline u16 phy_length(const struct skge_hw *hw, u32 status)
|
||||
{
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
return status >> XMR_FS_LEN_SHIFT;
|
||||
else
|
||||
return status >> GMR_FS_LEN_SHIFT;
|
||||
}
|
||||
|
||||
static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
|
||||
{
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
|
@ -2451,16 +2454,81 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
|
|||
(status & GMR_FS_RX_OK) == 0;
|
||||
}
|
||||
|
||||
static void skge_rx_error(struct skge_port *skge, int slot,
|
||||
u32 control, u32 status)
|
||||
|
||||
/* Get receive buffer from descriptor.
|
||||
* Handles copy of small buffers and reallocation failures
|
||||
*/
|
||||
static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
|
||||
struct skge_element *e,
|
||||
u32 control, u32 status, u16 csum)
|
||||
{
|
||||
if (netif_msg_rx_err(skge))
|
||||
printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
|
||||
skge->netdev->name, slot, control, status);
|
||||
struct sk_buff *skb;
|
||||
u16 len = control & BMU_BBC;
|
||||
|
||||
if (unlikely(netif_msg_rx_status(skge)))
|
||||
printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
|
||||
skge->netdev->name, e - skge->rx_ring.start,
|
||||
status, len);
|
||||
|
||||
if (len > skge->rx_buf_size)
|
||||
goto error;
|
||||
|
||||
if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
|
||||
skge->net_stats.rx_length_errors++;
|
||||
else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
|
||||
goto error;
|
||||
|
||||
if (bad_phy_status(skge->hw, status))
|
||||
goto error;
|
||||
|
||||
if (phy_length(skge->hw, status) != len)
|
||||
goto error;
|
||||
|
||||
if (len < RX_COPY_THRESHOLD) {
|
||||
skb = dev_alloc_skb(len + 2);
|
||||
if (!skb)
|
||||
goto resubmit;
|
||||
|
||||
skb_reserve(skb, 2);
|
||||
pci_dma_sync_single_for_cpu(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, e->skb->data, len);
|
||||
pci_dma_sync_single_for_device(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
skge_rx_reuse(e, skge->rx_buf_size);
|
||||
} else {
|
||||
struct sk_buff *nskb;
|
||||
nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
|
||||
if (!nskb)
|
||||
goto resubmit;
|
||||
|
||||
pci_unmap_single(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
pci_unmap_len(e, maplen),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skb = e->skb;
|
||||
prefetch(skb->data);
|
||||
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
|
||||
}
|
||||
|
||||
skb_put(skb, len);
|
||||
skb->dev = skge->netdev;
|
||||
if (skge->rx_csum) {
|
||||
skb->csum = csum;
|
||||
skb->ip_summed = CHECKSUM_HW;
|
||||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, skge->netdev);
|
||||
|
||||
return skb;
|
||||
error:
|
||||
|
||||
if (netif_msg_rx_err(skge))
|
||||
printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
|
||||
skge->netdev->name, e - skge->rx_ring.start,
|
||||
control, status);
|
||||
|
||||
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
|
||||
if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
|
||||
skge->net_stats.rx_length_errors++;
|
||||
if (status & XMR_FS_FRA_ERR)
|
||||
|
@ -2475,56 +2543,10 @@ static void skge_rx_error(struct skge_port *skge, int slot,
|
|||
if (status & GMR_FS_CRC_ERR)
|
||||
skge->net_stats.rx_crc_errors++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get receive buffer from descriptor.
|
||||
* Handles copy of small buffers and reallocation failures
|
||||
*/
|
||||
static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
|
||||
struct skge_element *e,
|
||||
unsigned int len)
|
||||
{
|
||||
struct sk_buff *nskb, *skb;
|
||||
|
||||
if (len < RX_COPY_THRESHOLD) {
|
||||
nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
|
||||
if (unlikely(!nskb))
|
||||
return NULL;
|
||||
|
||||
pci_dma_sync_single_for_cpu(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
memcpy(nskb->data, e->skb->data, len);
|
||||
pci_dma_sync_single_for_device(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
if (skge->rx_csum) {
|
||||
struct skge_rx_desc *rd = e->desc;
|
||||
nskb->csum = le16_to_cpu(rd->csum2);
|
||||
nskb->ip_summed = CHECKSUM_HW;
|
||||
}
|
||||
skge_rx_reuse(e, skge->rx_buf_size);
|
||||
return nskb;
|
||||
} else {
|
||||
nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
|
||||
if (unlikely(!nskb))
|
||||
return NULL;
|
||||
|
||||
pci_unmap_single(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
pci_unmap_len(e, maplen),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skb = e->skb;
|
||||
if (skge->rx_csum) {
|
||||
struct skge_rx_desc *rd = e->desc;
|
||||
skb->csum = le16_to_cpu(rd->csum2);
|
||||
skb->ip_summed = CHECKSUM_HW;
|
||||
}
|
||||
|
||||
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
|
||||
return skb;
|
||||
}
|
||||
resubmit:
|
||||
skge_rx_reuse(e, skge->rx_buf_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -2540,32 +2562,16 @@ static int skge_poll(struct net_device *dev, int *budget)
|
|||
for (e = ring->to_clean; work_done < to_do; e = e->next) {
|
||||
struct skge_rx_desc *rd = e->desc;
|
||||
struct sk_buff *skb;
|
||||
u32 control, len, status;
|
||||
u32 control;
|
||||
|
||||
rmb();
|
||||
control = rd->control;
|
||||
if (control & BMU_OWN)
|
||||
break;
|
||||
|
||||
len = control & BMU_BBC;
|
||||
status = rd->status;
|
||||
|
||||
if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
|
||||
|| bad_phy_status(hw, status))) {
|
||||
skge_rx_error(skge, e - ring->start, control, status);
|
||||
skge_rx_reuse(e, skge->rx_buf_size);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (netif_msg_rx_status(skge))
|
||||
printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
|
||||
dev->name, e - ring->start, rd->status, len);
|
||||
|
||||
skb = skge_rx_get(skge, e, len);
|
||||
skb = skge_rx_get(skge, e, control, rd->status,
|
||||
le16_to_cpu(rd->csum2));
|
||||
if (likely(skb)) {
|
||||
skb_put(skb, len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
dev->last_rx = jiffies;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
|
|
|
@ -953,6 +953,7 @@ enum {
|
|||
*/
|
||||
enum {
|
||||
XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
|
||||
XMR_FS_LEN_SHIFT = 18,
|
||||
XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
|
||||
XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
|
||||
XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
|
||||
|
@ -1868,6 +1869,7 @@ enum {
|
|||
/* Receive Frame Status Encoding */
|
||||
enum {
|
||||
GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
|
||||
GMR_FS_LEN_SHIFT = 16,
|
||||
GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
|
||||
GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
|
||||
GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
|
||||
|
|
|
@ -7,7 +7,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
|
|||
char *buffer, int buffer_size)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
char *scratch;
|
||||
int i = 0;
|
||||
int length = 0;
|
||||
|
||||
|
@ -18,9 +17,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
|
|||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
scratch = buffer;
|
||||
|
||||
|
||||
if (add_hotplug_env_var(envp, num_envp, &i,
|
||||
buffer, buffer_size, &length,
|
||||
"PCI_CLASS=%04X", pdev->class))
|
||||
|
|
|
@ -62,7 +62,7 @@ static ssize_t add_slot_store(struct dlpar_io_attr *dlpar_attr,
|
|||
char drc_name[MAX_DRC_NAME_LEN];
|
||||
char *end;
|
||||
|
||||
if (nbytes > MAX_DRC_NAME_LEN)
|
||||
if (nbytes >= MAX_DRC_NAME_LEN)
|
||||
return 0;
|
||||
|
||||
memcpy(drc_name, buf, nbytes);
|
||||
|
@ -83,7 +83,7 @@ static ssize_t remove_slot_store(struct dlpar_io_attr *dlpar_attr,
|
|||
char drc_name[MAX_DRC_NAME_LEN];
|
||||
char *end;
|
||||
|
||||
if (nbytes > MAX_DRC_NAME_LEN)
|
||||
if (nbytes >= MAX_DRC_NAME_LEN)
|
||||
return 0;
|
||||
|
||||
memcpy(drc_name, buf, nbytes);
|
||||
|
|
|
@ -159,7 +159,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
|
|||
|
||||
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
|
||||
|
||||
slot = kcalloc(1, sizeof(*slot), GFP_KERNEL);
|
||||
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
|
||||
if (!slot)
|
||||
return -ENOMEM;
|
||||
bss_hotplug_slot->private = slot;
|
||||
|
@ -491,7 +491,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
|
|||
if (sn_pci_slot_valid(pci_bus, device) != 1)
|
||||
continue;
|
||||
|
||||
bss_hotplug_slot = kcalloc(1, sizeof(*bss_hotplug_slot),
|
||||
bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
|
||||
GFP_KERNEL);
|
||||
if (!bss_hotplug_slot) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -499,7 +499,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
|
|||
}
|
||||
|
||||
bss_hotplug_slot->info =
|
||||
kcalloc(1, sizeof(struct hotplug_slot_info),
|
||||
kzalloc(sizeof(struct hotplug_slot_info),
|
||||
GFP_KERNEL);
|
||||
if (!bss_hotplug_slot->info) {
|
||||
rc = -ENOMEM;
|
||||
|
|
|
@ -360,7 +360,7 @@ pci_create_resource_files(struct pci_dev *pdev)
|
|||
continue;
|
||||
|
||||
/* allocate attribute structure, piggyback attribute name */
|
||||
res_attr = kcalloc(1, sizeof(*res_attr) + 10, GFP_ATOMIC);
|
||||
res_attr = kzalloc(sizeof(*res_attr) + 10, GFP_ATOMIC);
|
||||
if (res_attr) {
|
||||
char *res_attr_name = (char *)(res_attr + 1);
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
|||
if (l == 0xffffffff)
|
||||
l = 0;
|
||||
if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
|
||||
sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
|
||||
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
if (!sz)
|
||||
continue;
|
||||
res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
@ -215,7 +215,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
|||
if (l == 0xffffffff)
|
||||
l = 0;
|
||||
if (sz && sz != 0xffffffff) {
|
||||
sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK);
|
||||
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
|
||||
if (sz) {
|
||||
res->flags = (l & IORESOURCE_ROM_ENABLE) |
|
||||
IORESOURCE_MEM | IORESOURCE_PREFETCH |
|
||||
|
@ -402,6 +402,12 @@ static void pci_enable_crs(struct pci_dev *dev)
|
|||
static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
|
||||
{
|
||||
struct pci_bus *parent = child->parent;
|
||||
|
||||
/* Attempts to fix that up are really dangerous unless
|
||||
we're going to re-assign all bus numbers. */
|
||||
if (!pcibios_assign_all_busses())
|
||||
return;
|
||||
|
||||
while (parent->parent && parent->subordinate < max) {
|
||||
parent->subordinate = max;
|
||||
pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
|
||||
|
@ -478,8 +484,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
|
|||
* We need to assign a number to this bus which we always
|
||||
* do in the second pass.
|
||||
*/
|
||||
if (!pass)
|
||||
if (!pass) {
|
||||
if (pcibios_assign_all_busses())
|
||||
/* Temporarily disable forwarding of the
|
||||
configuration cycles on all bridges in
|
||||
this bus segment to avoid possible
|
||||
conflicts in the second pass between two
|
||||
bridges programmed with overlapping
|
||||
bus ranges. */
|
||||
pci_write_config_dword(dev, PCI_PRIMARY_BUS,
|
||||
buses & ~0xffffff);
|
||||
return max;
|
||||
}
|
||||
|
||||
/* Clear errors */
|
||||
pci_write_config_word(dev, PCI_STATUS, 0xffff);
|
||||
|
|
|
@ -437,7 +437,7 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
|
|||
if (cdev->dev.driver_data) {
|
||||
gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
|
||||
if (get_device(&gdev->dev)) {
|
||||
if (klist_node_attached(&gdev->dev.knode_bus))
|
||||
if (device_is_registered(&gdev->dev))
|
||||
return gdev;
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
|
|
|
@ -442,7 +442,6 @@ static void piix_sata_phy_reset(struct ata_port *ap)
|
|||
* piix_set_piomode - Initialize host controller PATA PIO timings
|
||||
* @ap: Port whose timings we are configuring
|
||||
* @adev: um
|
||||
* @pio: PIO mode, 0 - 4
|
||||
*
|
||||
* Set PIO mode for device, in host controller PCI config space.
|
||||
*
|
||||
|
|
|
@ -4131,6 +4131,53 @@ err_out:
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_host_set_remove - PCI layer callback for device removal
|
||||
* @host_set: ATA host set that was removed
|
||||
*
|
||||
* Unregister all objects associated with this host set. Free those
|
||||
* objects.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from calling layer (may sleep).
|
||||
*/
|
||||
|
||||
|
||||
void ata_host_set_remove(struct ata_host_set *host_set)
|
||||
{
|
||||
struct ata_port *ap;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < host_set->n_ports; i++) {
|
||||
ap = host_set->ports[i];
|
||||
scsi_remove_host(ap->host);
|
||||
}
|
||||
|
||||
free_irq(host_set->irq, host_set);
|
||||
|
||||
for (i = 0; i < host_set->n_ports; i++) {
|
||||
ap = host_set->ports[i];
|
||||
|
||||
ata_scsi_release(ap->host);
|
||||
|
||||
if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
if (ioaddr->cmd_addr == 0x1f0)
|
||||
release_region(0x1f0, 8);
|
||||
else if (ioaddr->cmd_addr == 0x170)
|
||||
release_region(0x170, 8);
|
||||
}
|
||||
|
||||
scsi_host_put(ap->host);
|
||||
}
|
||||
|
||||
if (host_set->ops->host_stop)
|
||||
host_set->ops->host_stop(host_set);
|
||||
|
||||
kfree(host_set);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_scsi_release - SCSI layer callback hook for host unload
|
||||
* @host: libata host to be unloaded
|
||||
|
@ -4471,39 +4518,8 @@ void ata_pci_remove_one (struct pci_dev *pdev)
|
|||
{
|
||||
struct device *dev = pci_dev_to_dev(pdev);
|
||||
struct ata_host_set *host_set = dev_get_drvdata(dev);
|
||||
struct ata_port *ap;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < host_set->n_ports; i++) {
|
||||
ap = host_set->ports[i];
|
||||
|
||||
scsi_remove_host(ap->host);
|
||||
}
|
||||
|
||||
free_irq(host_set->irq, host_set);
|
||||
|
||||
for (i = 0; i < host_set->n_ports; i++) {
|
||||
ap = host_set->ports[i];
|
||||
|
||||
ata_scsi_release(ap->host);
|
||||
|
||||
if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
if (ioaddr->cmd_addr == 0x1f0)
|
||||
release_region(0x1f0, 8);
|
||||
else if (ioaddr->cmd_addr == 0x170)
|
||||
release_region(0x170, 8);
|
||||
}
|
||||
|
||||
scsi_host_put(ap->host);
|
||||
}
|
||||
|
||||
if (host_set->ops->host_stop)
|
||||
host_set->ops->host_stop(host_set);
|
||||
|
||||
kfree(host_set);
|
||||
|
||||
ata_host_set_remove(host_set);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
@ -4573,6 +4589,7 @@ module_exit(ata_exit);
|
|||
EXPORT_SYMBOL_GPL(ata_std_bios_param);
|
||||
EXPORT_SYMBOL_GPL(ata_std_ports);
|
||||
EXPORT_SYMBOL_GPL(ata_device_add);
|
||||
EXPORT_SYMBOL_GPL(ata_host_set_remove);
|
||||
EXPORT_SYMBOL_GPL(ata_sg_init);
|
||||
EXPORT_SYMBOL_GPL(ata_sg_init_one);
|
||||
EXPORT_SYMBOL_GPL(ata_qc_complete);
|
||||
|
|
|
@ -1959,22 +1959,35 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
|
|||
/* Set it up */
|
||||
mesh_init(ms);
|
||||
|
||||
/* XXX FIXME: error should be fatal */
|
||||
if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms))
|
||||
/* Request interrupt */
|
||||
if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
|
||||
printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
|
||||
goto out_shutdown;
|
||||
}
|
||||
|
||||
/* XXX FIXME: handle failure */
|
||||
scsi_add_host(mesh_host, &mdev->ofdev.dev);
|
||||
/* Add scsi host & scan */
|
||||
if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
|
||||
goto out_release_irq;
|
||||
scsi_scan_host(mesh_host);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
out_release_irq:
|
||||
free_irq(ms->meshintr, ms);
|
||||
out_shutdown:
|
||||
/* shutdown & reset bus in case of error or macos can be confused
|
||||
* at reboot if the bus was set to synchronous mode already
|
||||
*/
|
||||
mesh_shutdown(mdev);
|
||||
set_mesh_power(ms, 0);
|
||||
pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
|
||||
ms->dma_cmd_space, ms->dma_cmd_bus);
|
||||
out_unmap:
|
||||
iounmap(ms->dma);
|
||||
iounmap(ms->mesh);
|
||||
out_free:
|
||||
out_free:
|
||||
scsi_host_put(mesh_host);
|
||||
out_release:
|
||||
out_release:
|
||||
macio_release_resources(mdev);
|
||||
|
||||
return -ENODEV;
|
||||
|
@ -2001,7 +2014,7 @@ static int mesh_remove(struct macio_dev *mdev)
|
|||
|
||||
/* Free DMA commands memory */
|
||||
pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
|
||||
ms->dma_cmd_space, ms->dma_cmd_bus);
|
||||
ms->dma_cmd_space, ms->dma_cmd_bus);
|
||||
|
||||
/* Release memory resources */
|
||||
macio_release_resources(mdev);
|
||||
|
|
|
@ -158,6 +158,8 @@ static struct pci_device_id nv_pci_tbl[] = {
|
|||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
|
||||
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
|
||||
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
|
||||
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
|
||||
PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
|
||||
|
|
|
@ -98,7 +98,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re
|
|||
{
|
||||
struct uart_port *port = dev_id;
|
||||
struct tty_struct *tty = port->info->tty;
|
||||
unsigned int status, ch, flg, ignored = 0;
|
||||
unsigned int status, ch, flg;
|
||||
|
||||
status = clps_readl(SYSFLG(port));
|
||||
while (!(status & SYSFLG_URXFE)) {
|
||||
|
|
|
@ -987,7 +987,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
|
|||
|
||||
/* remove this interface if it has been registered */
|
||||
interface = dev->actconfig->interface[i];
|
||||
if (!klist_node_attached(&interface->dev.knode_bus))
|
||||
if (!device_is_registered(&interface->dev))
|
||||
continue;
|
||||
dev_dbg (&dev->dev, "unregistering interface %s\n",
|
||||
interface->dev.bus_id);
|
||||
|
|
|
@ -303,7 +303,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
|
|||
/* if interface was already added, bind now; else let
|
||||
* the future device_add() bind it, bypassing probe()
|
||||
*/
|
||||
if (klist_node_attached(&dev->knode_bus))
|
||||
if (device_is_registered(dev))
|
||||
device_bind_driver(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -336,8 +336,8 @@ void usb_driver_release_interface(struct usb_driver *driver,
|
|||
if (iface->condition != USB_INTERFACE_BOUND)
|
||||
return;
|
||||
|
||||
/* release only after device_add() */
|
||||
if (klist_node_attached(&dev->knode_bus)) {
|
||||
/* don't release if the interface hasn't been added yet */
|
||||
if (device_is_registered(dev)) {
|
||||
iface->condition = USB_INTERFACE_UNBINDING;
|
||||
device_release_driver(dev);
|
||||
}
|
||||
|
|
|
@ -422,7 +422,7 @@ static inline void ep0_idle (struct pxa2xx_udc *dev)
|
|||
}
|
||||
|
||||
static int
|
||||
write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
|
||||
write_packet(volatile unsigned long *uddr, struct pxa2xx_request *req, unsigned max)
|
||||
{
|
||||
u8 *buf;
|
||||
unsigned length, count;
|
||||
|
@ -2602,7 +2602,7 @@ static int __exit pxa2xx_udc_remove(struct device *_dev)
|
|||
* VBUS IRQs should probably be ignored so that the PXA device just acts
|
||||
* "dead" to USB hosts until system resume.
|
||||
*/
|
||||
static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level)
|
||||
static int pxa2xx_udc_suspend(struct device *dev, pm_message_t state, u32 level)
|
||||
{
|
||||
struct pxa2xx_udc *udc = dev_get_drvdata(dev);
|
||||
|
||||
|
|
|
@ -69,11 +69,11 @@ struct pxa2xx_ep {
|
|||
* UDDR = UDC Endpoint Data Register (the fifo)
|
||||
* DRCM = DMA Request Channel Map
|
||||
*/
|
||||
volatile u32 *reg_udccs;
|
||||
volatile u32 *reg_ubcr;
|
||||
volatile u32 *reg_uddr;
|
||||
volatile unsigned long *reg_udccs;
|
||||
volatile unsigned long *reg_ubcr;
|
||||
volatile unsigned long *reg_uddr;
|
||||
#ifdef USE_DMA
|
||||
volatile u32 *reg_drcmr;
|
||||
volatile unsigned long *reg_drcmr;
|
||||
#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
|
||||
#else
|
||||
#define drcmr(n)
|
||||
|
|
|
@ -782,6 +782,9 @@ retry:
|
|||
/* usb 1.1 says max 90% of a frame is available for periodic transfers.
|
||||
* this driver doesn't promise that much since it's got to handle an
|
||||
* IRQ per packet; irq handling latencies also use up that time.
|
||||
*
|
||||
* NOTE: the periodic schedule is a sparse tree, with the load for
|
||||
* each branch minimized. see fig 3.5 in the OHCI spec for example.
|
||||
*/
|
||||
#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */
|
||||
|
||||
|
@ -843,6 +846,7 @@ static int sl811h_urb_enqueue(
|
|||
if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
|
||||
|| !HC_IS_RUNNING(hcd->state)) {
|
||||
retval = -ENODEV;
|
||||
kfree(ep);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -911,8 +915,16 @@ static int sl811h_urb_enqueue(
|
|||
case PIPE_ISOCHRONOUS:
|
||||
case PIPE_INTERRUPT:
|
||||
urb->interval = ep->period;
|
||||
if (ep->branch < PERIODIC_SIZE)
|
||||
if (ep->branch < PERIODIC_SIZE) {
|
||||
/* NOTE: the phase is correct here, but the value
|
||||
* needs offsetting by the transfer queue depth.
|
||||
* All current drivers ignore start_frame, so this
|
||||
* is unlikely to ever matter...
|
||||
*/
|
||||
urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
|
||||
+ ep->branch;
|
||||
break;
|
||||
}
|
||||
|
||||
retval = balance(sl811, ep->period, ep->load);
|
||||
if (retval < 0)
|
||||
|
@ -1122,7 +1134,7 @@ sl811h_hub_descriptor (
|
|||
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
|
||||
|
||||
/* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
|
||||
desc->bitmap[0] = 1 << 1;
|
||||
desc->bitmap[0] = 0 << 1;
|
||||
desc->bitmap[1] = ~0;
|
||||
}
|
||||
|
||||
|
|
|
@ -647,6 +647,13 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs)
|
|||
pkt_len -= 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the packet is unreasonably long, quietly drop it rather than
|
||||
* kernel panicing by calling skb_put.
|
||||
*/
|
||||
if (pkt_len > PEGASUS_MTU)
|
||||
goto goon;
|
||||
|
||||
/*
|
||||
* at this point we are sure pegasus->rx_skb != NULL
|
||||
* so we go ahead and pass up the packet.
|
||||
|
@ -886,15 +893,17 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
|
|||
__u8 data[2];
|
||||
|
||||
read_eprom_word(pegasus, 4, (__u16 *) data);
|
||||
if (data[1] < 0x80) {
|
||||
if (netif_msg_timer(pegasus))
|
||||
dev_info(&pegasus->intf->dev,
|
||||
"intr interval changed from %ums to %ums\n",
|
||||
data[1], 0x80);
|
||||
data[1] = 0x80;
|
||||
#ifdef PEGASUS_WRITE_EEPROM
|
||||
write_eprom_word(pegasus, 4, *(__u16 *) data);
|
||||
if (pegasus->usb->speed != USB_SPEED_HIGH) {
|
||||
if (data[1] < 0x80) {
|
||||
if (netif_msg_timer(pegasus))
|
||||
dev_info(&pegasus->intf->dev, "intr interval "
|
||||
"changed from %ums to %ums\n",
|
||||
data[1], 0x80);
|
||||
data[1] = 0x80;
|
||||
#ifdef PEGASUS_WRITE_EEPROM
|
||||
write_eprom_word(pegasus, 4, *(__u16 *) data);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
pegasus->intr_interval = data[1];
|
||||
}
|
||||
|
@ -904,8 +913,9 @@ static void set_carrier(struct net_device *net)
|
|||
pegasus_t *pegasus = netdev_priv(net);
|
||||
u16 tmp;
|
||||
|
||||
if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
|
||||
if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
|
||||
return;
|
||||
|
||||
if (tmp & BMSR_LSTATUS)
|
||||
netif_carrier_on(net);
|
||||
else
|
||||
|
@ -1355,6 +1365,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
|
|||
cancel_delayed_work(&pegasus->carrier_check);
|
||||
unregister_netdev(pegasus->net);
|
||||
usb_put_dev(interface_to_usbdev(intf));
|
||||
unlink_all_urbs(pegasus);
|
||||
free_all_urbs(pegasus);
|
||||
free_skb_pool(pegasus);
|
||||
if (pegasus->rx_skb)
|
||||
|
|
|
@ -16,7 +16,8 @@
|
|||
#include "usb-serial.h"
|
||||
|
||||
static struct usb_device_id id_table [] = {
|
||||
{ USB_DEVICE(0xf3d, 0x0112) },
|
||||
{ USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */
|
||||
{ USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, id_table);
|
||||
|
|
|
@ -1846,10 +1846,12 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct termios *old_
|
|||
} else {
|
||||
/* set the baudrate determined before */
|
||||
if (change_speed(port)) {
|
||||
err("%s urb failed to set baurdrate", __FUNCTION__);
|
||||
err("%s urb failed to set baudrate", __FUNCTION__);
|
||||
}
|
||||
/* Ensure RTS and DTR are raised when baudrate changed from 0 */
|
||||
if ((old_termios->c_cflag & CBAUD) == B0) {
|
||||
set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
|
||||
}
|
||||
/* Ensure RTS and DTR are raised */
|
||||
set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
|
||||
}
|
||||
|
||||
/* Set flow control */
|
||||
|
|
|
@ -25,6 +25,9 @@
|
|||
2005-06-20 v0.4.1 add missing braces :-/
|
||||
killed end-of-line whitespace
|
||||
2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2
|
||||
2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard
|
||||
2005-09-20 v0.4.4 increased recv buffer size: the card sometimes
|
||||
wants to send >2000 bytes.
|
||||
|
||||
Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
|
||||
|
||||
|
@ -71,15 +74,21 @@ static int option_send_setup(struct usb_serial_port *port);
|
|||
|
||||
/* Vendor and product IDs */
|
||||
#define OPTION_VENDOR_ID 0x0AF0
|
||||
#define HUAWEI_VENDOR_ID 0x12D1
|
||||
#define AUDIOVOX_VENDOR_ID 0x0F3D
|
||||
|
||||
#define OPTION_PRODUCT_OLD 0x5000
|
||||
#define OPTION_PRODUCT_FUSION 0x6000
|
||||
#define OPTION_PRODUCT_FUSION2 0x6300
|
||||
#define HUAWEI_PRODUCT_E600 0x1001
|
||||
#define AUDIOVOX_PRODUCT_AIRCARD 0x0112
|
||||
|
||||
static struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) },
|
||||
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) },
|
||||
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) },
|
||||
{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) },
|
||||
{ USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
|
@ -132,7 +141,7 @@ static int debug;
|
|||
|
||||
#define N_IN_URB 4
|
||||
#define N_OUT_URB 1
|
||||
#define IN_BUFLEN 1024
|
||||
#define IN_BUFLEN 4096
|
||||
#define OUT_BUFLEN 128
|
||||
|
||||
struct option_port_private {
|
||||
|
|
|
@ -253,9 +253,11 @@ int atyfb_xl_init(struct fb_info *info)
|
|||
aty_st_le32(0xFC, 0x00000000, par);
|
||||
|
||||
#if defined (CONFIG_FB_ATY_GENERIC_LCD)
|
||||
int i;
|
||||
for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) {
|
||||
aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lcd_tbl); i++)
|
||||
aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
155
fs/9p/conv.c
155
fs/9p/conv.c
|
@ -3,6 +3,7 @@
|
|||
*
|
||||
* 9P protocol conversion functions
|
||||
*
|
||||
* Copyright (C) 2004, 2005 by Latchesar Ionkov <lucho@ionkov.net>
|
||||
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
|
||||
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
|
||||
*
|
||||
|
@ -55,66 +56,70 @@ static inline int buf_check_overflow(struct cbuf *buf)
|
|||
return buf->p > buf->ep;
|
||||
}
|
||||
|
||||
static inline void buf_check_size(struct cbuf *buf, int len)
|
||||
static inline int buf_check_size(struct cbuf *buf, int len)
|
||||
{
|
||||
if (buf->p+len > buf->ep) {
|
||||
if (buf->p < buf->ep) {
|
||||
eprintk(KERN_ERR, "buffer overflow\n");
|
||||
buf->p = buf->ep + 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void *buf_alloc(struct cbuf *buf, int len)
|
||||
{
|
||||
void *ret = NULL;
|
||||
|
||||
buf_check_size(buf, len);
|
||||
ret = buf->p;
|
||||
buf->p += len;
|
||||
if (buf_check_size(buf, len)) {
|
||||
ret = buf->p;
|
||||
buf->p += len;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void buf_put_int8(struct cbuf *buf, u8 val)
|
||||
{
|
||||
buf_check_size(buf, 1);
|
||||
|
||||
buf->p[0] = val;
|
||||
buf->p++;
|
||||
if (buf_check_size(buf, 1)) {
|
||||
buf->p[0] = val;
|
||||
buf->p++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void buf_put_int16(struct cbuf *buf, u16 val)
|
||||
{
|
||||
buf_check_size(buf, 2);
|
||||
|
||||
*(__le16 *) buf->p = cpu_to_le16(val);
|
||||
buf->p += 2;
|
||||
if (buf_check_size(buf, 2)) {
|
||||
*(__le16 *) buf->p = cpu_to_le16(val);
|
||||
buf->p += 2;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void buf_put_int32(struct cbuf *buf, u32 val)
|
||||
{
|
||||
buf_check_size(buf, 4);
|
||||
|
||||
*(__le32 *)buf->p = cpu_to_le32(val);
|
||||
buf->p += 4;
|
||||
if (buf_check_size(buf, 4)) {
|
||||
*(__le32 *)buf->p = cpu_to_le32(val);
|
||||
buf->p += 4;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void buf_put_int64(struct cbuf *buf, u64 val)
|
||||
{
|
||||
buf_check_size(buf, 8);
|
||||
|
||||
*(__le64 *)buf->p = cpu_to_le64(val);
|
||||
buf->p += 8;
|
||||
if (buf_check_size(buf, 8)) {
|
||||
*(__le64 *)buf->p = cpu_to_le64(val);
|
||||
buf->p += 8;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen)
|
||||
{
|
||||
buf_check_size(buf, slen + 2);
|
||||
|
||||
buf_put_int16(buf, slen);
|
||||
memcpy(buf->p, s, slen);
|
||||
buf->p += slen;
|
||||
if (buf_check_size(buf, slen + 2)) {
|
||||
buf_put_int16(buf, slen);
|
||||
memcpy(buf->p, s, slen);
|
||||
buf->p += slen;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void buf_put_string(struct cbuf *buf, const char *s)
|
||||
|
@ -124,20 +129,20 @@ static inline void buf_put_string(struct cbuf *buf, const char *s)
|
|||
|
||||
static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen)
|
||||
{
|
||||
buf_check_size(buf, datalen);
|
||||
|
||||
memcpy(buf->p, data, datalen);
|
||||
buf->p += datalen;
|
||||
if (buf_check_size(buf, datalen)) {
|
||||
memcpy(buf->p, data, datalen);
|
||||
buf->p += datalen;
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 buf_get_int8(struct cbuf *buf)
|
||||
{
|
||||
u8 ret = 0;
|
||||
|
||||
buf_check_size(buf, 1);
|
||||
ret = buf->p[0];
|
||||
|
||||
buf->p++;
|
||||
if (buf_check_size(buf, 1)) {
|
||||
ret = buf->p[0];
|
||||
buf->p++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -146,10 +151,10 @@ static inline u16 buf_get_int16(struct cbuf *buf)
|
|||
{
|
||||
u16 ret = 0;
|
||||
|
||||
buf_check_size(buf, 2);
|
||||
ret = le16_to_cpu(*(__le16 *)buf->p);
|
||||
|
||||
buf->p += 2;
|
||||
if (buf_check_size(buf, 2)) {
|
||||
ret = le16_to_cpu(*(__le16 *)buf->p);
|
||||
buf->p += 2;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -158,10 +163,10 @@ static inline u32 buf_get_int32(struct cbuf *buf)
|
|||
{
|
||||
u32 ret = 0;
|
||||
|
||||
buf_check_size(buf, 4);
|
||||
ret = le32_to_cpu(*(__le32 *)buf->p);
|
||||
|
||||
buf->p += 4;
|
||||
if (buf_check_size(buf, 4)) {
|
||||
ret = le32_to_cpu(*(__le32 *)buf->p);
|
||||
buf->p += 4;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -170,10 +175,10 @@ static inline u64 buf_get_int64(struct cbuf *buf)
|
|||
{
|
||||
u64 ret = 0;
|
||||
|
||||
buf_check_size(buf, 8);
|
||||
ret = le64_to_cpu(*(__le64 *)buf->p);
|
||||
|
||||
buf->p += 8;
|
||||
if (buf_check_size(buf, 8)) {
|
||||
ret = le64_to_cpu(*(__le64 *)buf->p);
|
||||
buf->p += 8;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -181,27 +186,35 @@ static inline u64 buf_get_int64(struct cbuf *buf)
|
|||
static inline int
|
||||
buf_get_string(struct cbuf *buf, char *data, unsigned int datalen)
|
||||
{
|
||||
u16 len = 0;
|
||||
|
||||
u16 len = buf_get_int16(buf);
|
||||
buf_check_size(buf, len);
|
||||
if (len + 1 > datalen)
|
||||
return 0;
|
||||
len = buf_get_int16(buf);
|
||||
if (!buf_check_overflow(buf) && buf_check_size(buf, len) && len+1>datalen) {
|
||||
memcpy(data, buf->p, len);
|
||||
data[len] = 0;
|
||||
buf->p += len;
|
||||
len++;
|
||||
}
|
||||
|
||||
memcpy(data, buf->p, len);
|
||||
data[len] = 0;
|
||||
buf->p += len;
|
||||
|
||||
return len + 1;
|
||||
return len;
|
||||
}
|
||||
|
||||
static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf)
|
||||
{
|
||||
char *ret = NULL;
|
||||
int n = buf_get_string(buf, sbuf->p, sbuf->ep - sbuf->p);
|
||||
char *ret;
|
||||
u16 len;
|
||||
|
||||
if (n > 0) {
|
||||
ret = NULL;
|
||||
len = buf_get_int16(buf);
|
||||
|
||||
if (!buf_check_overflow(buf) && buf_check_size(buf, len) &&
|
||||
buf_check_size(sbuf, len+1)) {
|
||||
|
||||
memcpy(sbuf->p, buf->p, len);
|
||||
sbuf->p[len] = 0;
|
||||
ret = sbuf->p;
|
||||
sbuf->p += n;
|
||||
buf->p += len;
|
||||
sbuf->p += len + 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -209,12 +222,15 @@ static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf)
|
|||
|
||||
static inline int buf_get_data(struct cbuf *buf, void *data, int datalen)
|
||||
{
|
||||
buf_check_size(buf, datalen);
|
||||
int ret = 0;
|
||||
|
||||
memcpy(data, buf->p, datalen);
|
||||
buf->p += datalen;
|
||||
if (buf_check_size(buf, datalen)) {
|
||||
memcpy(data, buf->p, datalen);
|
||||
buf->p += datalen;
|
||||
ret = datalen;
|
||||
}
|
||||
|
||||
return datalen;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf,
|
||||
|
@ -223,13 +239,12 @@ static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf,
|
|||
char *ret = NULL;
|
||||
int n = 0;
|
||||
|
||||
buf_check_size(dbuf, datalen);
|
||||
|
||||
n = buf_get_data(buf, dbuf->p, datalen);
|
||||
|
||||
if (n > 0) {
|
||||
ret = dbuf->p;
|
||||
dbuf->p += n;
|
||||
if (buf_check_size(dbuf, datalen)) {
|
||||
n = buf_get_data(buf, dbuf->p, datalen);
|
||||
if (n > 0) {
|
||||
ret = dbuf->p;
|
||||
dbuf->p += n;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -636,7 +651,7 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize,
|
|||
break;
|
||||
case RWALK:
|
||||
rcall->params.rwalk.nwqid = buf_get_int16(bufp);
|
||||
rcall->params.rwalk.wqids = buf_alloc(bufp,
|
||||
rcall->params.rwalk.wqids = buf_alloc(dbufp,
|
||||
rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid));
|
||||
if (rcall->params.rwalk.wqids)
|
||||
for (i = 0; i < rcall->params.rwalk.nwqid; i++) {
|
||||
|
|
|
@ -303,7 +303,13 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
|
|||
goto SessCleanUp;
|
||||
};
|
||||
|
||||
v9ses->transport = trans_proto;
|
||||
v9ses->transport = kmalloc(sizeof(*v9ses->transport), GFP_KERNEL);
|
||||
if (!v9ses->transport) {
|
||||
retval = -ENOMEM;
|
||||
goto SessCleanUp;
|
||||
}
|
||||
|
||||
memmove(v9ses->transport, trans_proto, sizeof(*v9ses->transport));
|
||||
|
||||
if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) {
|
||||
eprintk(KERN_ERR, "problem initializing transport\n");
|
||||
|
|
|
@ -1063,8 +1063,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
|
|||
int ret;
|
||||
char *link = __getname();
|
||||
|
||||
if (strlen(link) < buflen)
|
||||
buflen = strlen(link);
|
||||
if (buflen > PATH_MAX)
|
||||
buflen = PATH_MAX;
|
||||
|
||||
dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
|
||||
|
||||
|
|
|
@ -129,8 +129,8 @@ static struct super_block *v9fs_get_sb(struct file_system_type
|
|||
|
||||
if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) {
|
||||
dprintk(DEBUG_ERROR, "problem initiating session\n");
|
||||
retval = newfid;
|
||||
goto free_session;
|
||||
kfree(v9ses);
|
||||
return ERR_PTR(newfid);
|
||||
}
|
||||
|
||||
sb = sget(fs_type, NULL, v9fs_set_super, v9ses);
|
||||
|
@ -150,7 +150,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type
|
|||
|
||||
if (!root) {
|
||||
retval = -ENOMEM;
|
||||
goto release_inode;
|
||||
goto put_back_sb;
|
||||
}
|
||||
|
||||
sb->s_root = root;
|
||||
|
@ -159,7 +159,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type
|
|||
root_fid = v9fs_fid_create(root);
|
||||
if (root_fid == NULL) {
|
||||
retval = -ENOMEM;
|
||||
goto release_dentry;
|
||||
goto put_back_sb;
|
||||
}
|
||||
|
||||
root_fid->fidopen = 0;
|
||||
|
@ -182,25 +182,15 @@ static struct super_block *v9fs_get_sb(struct file_system_type
|
|||
|
||||
if (stat_result < 0) {
|
||||
retval = stat_result;
|
||||
goto release_dentry;
|
||||
goto put_back_sb;
|
||||
}
|
||||
|
||||
return sb;
|
||||
|
||||
release_dentry:
|
||||
dput(sb->s_root);
|
||||
|
||||
release_inode:
|
||||
iput(inode);
|
||||
|
||||
put_back_sb:
|
||||
put_back_sb:
|
||||
/* deactivate_super calls v9fs_kill_super which will frees the rest */
|
||||
up_write(&sb->s_umount);
|
||||
deactivate_super(sb);
|
||||
v9fs_session_close(v9ses);
|
||||
|
||||
free_session:
|
||||
kfree(v9ses);
|
||||
|
||||
return ERR_PTR(retval);
|
||||
}
|
||||
|
||||
|
|
|
@ -781,6 +781,8 @@ static int cifs_oplock_thread(void * dummyarg)
|
|||
|
||||
oplockThread = current;
|
||||
do {
|
||||
if (try_to_freeze())
|
||||
continue;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
schedule_timeout(1*HZ);
|
||||
|
|
|
@ -344,6 +344,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
|
|||
}
|
||||
|
||||
while (server->tcpStatus != CifsExiting) {
|
||||
if (try_to_freeze())
|
||||
continue;
|
||||
if (bigbuf == NULL) {
|
||||
bigbuf = cifs_buf_get();
|
||||
if(bigbuf == NULL) {
|
||||
|
|
|
@ -1410,7 +1410,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
|
|||
unsigned long desc_count;
|
||||
struct ext3_group_desc *gdp;
|
||||
int i;
|
||||
unsigned long ngroups;
|
||||
unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
|
||||
#ifdef EXT3FS_DEBUG
|
||||
struct ext3_super_block *es;
|
||||
unsigned long bitmap_count, x;
|
||||
|
@ -1421,7 +1421,8 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
|
|||
desc_count = 0;
|
||||
bitmap_count = 0;
|
||||
gdp = NULL;
|
||||
for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
|
||||
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
gdp = ext3_get_group_desc(sb, i, NULL);
|
||||
if (!gdp)
|
||||
continue;
|
||||
|
@ -1443,7 +1444,6 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
|
|||
return bitmap_count;
|
||||
#else
|
||||
desc_count = 0;
|
||||
ngroups = EXT3_SB(sb)->s_groups_count;
|
||||
smp_rmb();
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
gdp = ext3_get_group_desc(sb, i, NULL);
|
||||
|
|
|
@ -242,7 +242,7 @@ static int setup_new_group_blocks(struct super_block *sb,
|
|||
i < sbi->s_itb_per_group; i++, bit++, block++) {
|
||||
struct buffer_head *it;
|
||||
|
||||
ext3_debug("clear inode block %#04x (+%ld)\n", block, bit);
|
||||
ext3_debug("clear inode block %#04lx (+%d)\n", block, bit);
|
||||
if (IS_ERR(it = bclean(handle, sb, block))) {
|
||||
err = PTR_ERR(it);
|
||||
goto exit_bh;
|
||||
|
@ -643,8 +643,8 @@ static void update_backups(struct super_block *sb,
|
|||
break;
|
||||
|
||||
bh = sb_getblk(sb, group * bpg + blk_off);
|
||||
ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n",
|
||||
bh->b_blocknr);
|
||||
ext3_debug("update metadata backup %#04lx\n",
|
||||
(unsigned long)bh->b_blocknr);
|
||||
if ((err = ext3_journal_get_write_access(handle, bh)))
|
||||
break;
|
||||
lock_buffer(bh);
|
||||
|
|
|
@ -512,15 +512,14 @@ static void ext3_clear_inode(struct inode *inode)
|
|||
|
||||
static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
||||
{
|
||||
struct ext3_sb_info *sbi = EXT3_SB(vfs->mnt_sb);
|
||||
struct super_block *sb = vfs->mnt_sb;
|
||||
struct ext3_sb_info *sbi = EXT3_SB(sb);
|
||||
|
||||
if (sbi->s_mount_opt & EXT3_MOUNT_JOURNAL_DATA)
|
||||
if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
|
||||
seq_puts(seq, ",data=journal");
|
||||
|
||||
if (sbi->s_mount_opt & EXT3_MOUNT_ORDERED_DATA)
|
||||
else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
|
||||
seq_puts(seq, ",data=ordered");
|
||||
|
||||
if (sbi->s_mount_opt & EXT3_MOUNT_WRITEBACK_DATA)
|
||||
else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
|
||||
seq_puts(seq, ",data=writeback");
|
||||
|
||||
#if defined(CONFIG_QUOTA)
|
||||
|
|
|
@ -129,8 +129,7 @@ void jfs_delete_inode(struct inode *inode)
|
|||
jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
|
||||
|
||||
if (!is_bad_inode(inode) &&
|
||||
(JFS_IP(inode)->fileset == cpu_to_le32(FILESYSTEM_I))) {
|
||||
|
||||
(JFS_IP(inode)->fileset == FILESYSTEM_I)) {
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
|
||||
if (test_cflag(COMMIT_Freewmap, inode))
|
||||
|
|
|
@ -3055,7 +3055,7 @@ static int cntlz(u32 value)
|
|||
* RETURN VALUES:
|
||||
* log2 number of blocks
|
||||
*/
|
||||
int blkstol2(s64 nb)
|
||||
static int blkstol2(s64 nb)
|
||||
{
|
||||
int l2nb;
|
||||
s64 mask; /* meant to be signed */
|
||||
|
|
|
@ -725,6 +725,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
|
|||
else
|
||||
tlck->flag = tlckINODELOCK;
|
||||
|
||||
if (S_ISDIR(ip->i_mode))
|
||||
tlck->flag |= tlckDIRECTORY;
|
||||
|
||||
tlck->type = 0;
|
||||
|
||||
/* bind the tlock and the page */
|
||||
|
@ -1009,6 +1012,8 @@ struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
|
|||
|
||||
/* bind the tlock and the object */
|
||||
tlck->flag = tlckINODELOCK;
|
||||
if (S_ISDIR(ip->i_mode))
|
||||
tlck->flag |= tlckDIRECTORY;
|
||||
tlck->ip = ip;
|
||||
tlck->mp = NULL;
|
||||
|
||||
|
@ -1077,6 +1082,8 @@ struct linelock *txLinelock(struct linelock * tlock)
|
|||
linelock->flag = tlckLINELOCK;
|
||||
linelock->maxcnt = TLOCKLONG;
|
||||
linelock->index = 0;
|
||||
if (tlck->flag & tlckDIRECTORY)
|
||||
linelock->flag |= tlckDIRECTORY;
|
||||
|
||||
/* append linelock after tlock */
|
||||
linelock->next = tlock->next;
|
||||
|
@ -2070,8 +2077,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
|
|||
*
|
||||
* function: log from maplock of freed data extents;
|
||||
*/
|
||||
void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
|
||||
struct tlock * tlck)
|
||||
static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
|
||||
struct tlock * tlck)
|
||||
{
|
||||
struct pxd_lock *pxdlock;
|
||||
int i, nlock;
|
||||
|
@ -2209,7 +2216,7 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
|
|||
* function: synchronously write pages locked by transaction
|
||||
* after txLog() but before txUpdateMap();
|
||||
*/
|
||||
void txForce(struct tblock * tblk)
|
||||
static void txForce(struct tblock * tblk)
|
||||
{
|
||||
struct tlock *tlck;
|
||||
lid_t lid, next;
|
||||
|
@ -2358,7 +2365,7 @@ static void txUpdateMap(struct tblock * tblk)
|
|||
*/
|
||||
else { /* (maplock->flag & mlckFREE) */
|
||||
|
||||
if (S_ISDIR(tlck->ip->i_mode))
|
||||
if (tlck->flag & tlckDIRECTORY)
|
||||
txFreeMap(ipimap, maplock,
|
||||
tblk, COMMIT_PWMAP);
|
||||
else
|
||||
|
|
|
@ -122,6 +122,7 @@ extern struct tlock *TxLock; /* transaction lock table */
|
|||
#define tlckLOG 0x0800
|
||||
/* updateMap state */
|
||||
#define tlckUPDATEMAP 0x0080
|
||||
#define tlckDIRECTORY 0x0040
|
||||
/* freeLock state */
|
||||
#define tlckFREELOCK 0x0008
|
||||
#define tlckWRITEPAGE 0x0004
|
||||
|
|
|
@ -184,14 +184,13 @@ static void nfs_readpage_release(struct nfs_page *req)
|
|||
{
|
||||
unlock_page(req->wb_page);
|
||||
|
||||
nfs_clear_request(req);
|
||||
nfs_release_request(req);
|
||||
|
||||
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
|
||||
req->wb_context->dentry->d_inode->i_sb->s_id,
|
||||
(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
|
||||
req->wb_bytes,
|
||||
(long long)req_offset(req));
|
||||
nfs_clear_request(req);
|
||||
nfs_release_request(req);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue