Merge remote branch 'korg/drm-fixes' into drm-vmware-next
necessary for some of the vmware fixes to be pushed in. Conflicts: drivers/gpu/drm/drm_gem.c drivers/gpu/drm/i915/intel_fb.c include/drm/drmP.h
This commit is contained in:
commit
fb7ba2114b
|
@ -2677,6 +2677,8 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
|
|||
L: lm-sensors@lm-sensors.org
|
||||
W: http://www.lm-sensors.org/
|
||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
|
||||
T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/
|
||||
F: drivers/hwmon/
|
||||
|
|
|
@ -73,8 +73,6 @@
|
|||
ldq $20, HAE_REG($19); \
|
||||
stq $21, HAE_CACHE($19); \
|
||||
stq $21, 0($20); \
|
||||
ldq $0, 0($sp); \
|
||||
ldq $1, 8($sp); \
|
||||
99:; \
|
||||
ldq $19, 72($sp); \
|
||||
ldq $20, 80($sp); \
|
||||
|
@ -316,7 +314,7 @@ ret_from_sys_call:
|
|||
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
|
||||
ldq $0, SP_OFF($sp)
|
||||
and $0, 8, $0
|
||||
beq $0, restore_all
|
||||
beq $0, ret_to_kernel
|
||||
ret_to_user:
|
||||
/* Make sure need_resched and sigpending don't change between
|
||||
sampling and the rti. */
|
||||
|
@ -329,6 +327,11 @@ restore_all:
|
|||
RESTORE_ALL
|
||||
call_pal PAL_rti
|
||||
|
||||
ret_to_kernel:
|
||||
lda $16, 7
|
||||
call_pal PAL_swpipl
|
||||
br restore_all
|
||||
|
||||
.align 3
|
||||
$syscall_error:
|
||||
/*
|
||||
|
@ -657,7 +660,7 @@ kernel_thread:
|
|||
/* We don't actually care for a3 success widgetry in the kernel.
|
||||
Not for positive errno values. */
|
||||
stq $0, 0($sp) /* $0 */
|
||||
br restore_all
|
||||
br ret_to_kernel
|
||||
.end kernel_thread
|
||||
|
||||
/*
|
||||
|
|
|
@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
|
|||
dest[27] = pt->r27;
|
||||
dest[28] = pt->r28;
|
||||
dest[29] = pt->gp;
|
||||
dest[30] = rdusp();
|
||||
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
|
||||
dest[31] = pt->pc;
|
||||
|
||||
/* Once upon a time this was the PS value. Which is stupid
|
||||
|
|
|
@ -157,7 +157,6 @@ typedef struct sigaltstack {
|
|||
#undef __HAVE_ARCH_SIG_BITOPS
|
||||
|
||||
struct pt_regs;
|
||||
extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
|
||||
|
||||
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
|
||||
|
||||
|
|
|
@ -351,6 +351,7 @@
|
|||
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
|
||||
#define __ARCH_WANT_SYS_OLDUMOUNT
|
||||
#define __ARCH_WANT_SYS_RT_SIGACTION
|
||||
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
|
||||
|
||||
#define __IGNORE_lchown
|
||||
#define __IGNORE_setuid
|
||||
|
|
|
@ -235,10 +235,9 @@ work_resched:
|
|||
work_notifysig: ; deal with pending signals and
|
||||
; notify-resume requests
|
||||
mv r0, sp ; arg1 : struct pt_regs *regs
|
||||
ldi r1, #0 ; arg2 : sigset_t *oldset
|
||||
mv r2, r9 ; arg3 : __u32 thread_info_flags
|
||||
mv r1, r9 ; arg2 : __u32 thread_info_flags
|
||||
bl do_notify_resume
|
||||
bra restore_all
|
||||
bra resume_userspace
|
||||
|
||||
; perform syscall exit tracing
|
||||
ALIGN
|
||||
|
|
|
@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
|
|||
|
||||
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
|
||||
!= sizeof(insn))
|
||||
break;
|
||||
return -EIO;
|
||||
|
||||
compute_next_pc(insn, pc, &next_pc, child);
|
||||
if (next_pc & 0x80000000)
|
||||
break;
|
||||
return -EIO;
|
||||
|
||||
if (embed_debug_trap(child, next_pc))
|
||||
break;
|
||||
return -EIO;
|
||||
|
||||
invalidate_cache();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
|
|
|
@ -28,37 +28,6 @@
|
|||
|
||||
#define DEBUG_SIG 0
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
int do_signal(struct pt_regs *, sigset_t *);
|
||||
|
||||
asmlinkage int
|
||||
sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
|
||||
unsigned long r2, unsigned long r3, unsigned long r4,
|
||||
unsigned long r5, unsigned long r6, struct pt_regs *regs)
|
||||
{
|
||||
sigset_t newset;
|
||||
|
||||
/* XXX: Don't preclude handling different sized sigset_t's. */
|
||||
if (sigsetsize != sizeof(sigset_t))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&newset, unewset, sizeof(newset)))
|
||||
return -EFAULT;
|
||||
sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = newset;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
set_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
return -ERESTARTNOHAND;
|
||||
}
|
||||
|
||||
asmlinkage int
|
||||
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||
unsigned long r2, unsigned long r3, unsigned long r4,
|
||||
|
@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
|
|||
return (void __user *)((sp - frame_size) & -8ul);
|
||||
}
|
||||
|
||||
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe __user *frame;
|
||||
|
@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
current->comm, current->pid, frame, regs->pc);
|
||||
#endif
|
||||
|
||||
return;
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int prev_insn(struct pt_regs *regs)
|
||||
{
|
||||
u16 inst;
|
||||
if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
|
||||
return -EFAULT;
|
||||
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
|
||||
regs->bpc -= 2;
|
||||
else
|
||||
regs->bpc -= 4;
|
||||
regs->syscall_nr = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we're invoking a handler
|
||||
*/
|
||||
|
||||
static void
|
||||
static int
|
||||
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *oldset, struct pt_regs *regs)
|
||||
{
|
||||
unsigned short inst;
|
||||
|
||||
/* Are we from a system call? */
|
||||
if (regs->syscall_nr >= 0) {
|
||||
/* If so, check system call restarting.. */
|
||||
|
@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
/* fallthrough */
|
||||
case -ERESTARTNOINTR:
|
||||
regs->r0 = regs->orig_r0;
|
||||
inst = *(unsigned short *)(regs->bpc - 2);
|
||||
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
|
||||
regs->bpc -= 2;
|
||||
else
|
||||
regs->bpc -= 4;
|
||||
if (prev_insn(regs) < 0)
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the stack frame */
|
||||
setup_rt_frame(sig, ka, info, oldset, regs);
|
||||
if (setup_rt_frame(sig, ka, info, oldset, regs))
|
||||
return -EFAULT;
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
|
||||
|
@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
sigaddset(¤t->blocked,sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
* mistake.
|
||||
*/
|
||||
int do_signal(struct pt_regs *regs, sigset_t *oldset)
|
||||
static void do_signal(struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
int signr;
|
||||
struct k_sigaction ka;
|
||||
unsigned short inst;
|
||||
sigset_t *oldset;
|
||||
|
||||
/*
|
||||
* We want the common case to go fast, which
|
||||
|
@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
|
|||
* if so.
|
||||
*/
|
||||
if (!user_mode(regs))
|
||||
return 1;
|
||||
return;
|
||||
|
||||
if (try_to_freeze())
|
||||
goto no_signal;
|
||||
|
||||
if (!oldset)
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
oldset = ¤t->saved_sigmask;
|
||||
else
|
||||
oldset = ¤t->blocked;
|
||||
|
||||
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
||||
|
@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
|
|||
*/
|
||||
|
||||
/* Whee! Actually deliver the signal. */
|
||||
handle_signal(signr, &ka, &info, oldset, regs);
|
||||
return 1;
|
||||
if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
no_signal:
|
||||
|
@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
|
|||
regs->r0 == -ERESTARTSYS ||
|
||||
regs->r0 == -ERESTARTNOINTR) {
|
||||
regs->r0 = regs->orig_r0;
|
||||
inst = *(unsigned short *)(regs->bpc - 2);
|
||||
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
|
||||
regs->bpc -= 2;
|
||||
else
|
||||
regs->bpc -= 4;
|
||||
}
|
||||
if (regs->r0 == -ERESTART_RESTARTBLOCK){
|
||||
prev_insn(regs);
|
||||
} else if (regs->r0 == -ERESTART_RESTARTBLOCK){
|
||||
regs->r0 = regs->orig_r0;
|
||||
regs->r7 = __NR_restart_syscall;
|
||||
inst = *(unsigned short *)(regs->bpc - 2);
|
||||
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
|
||||
regs->bpc -= 2;
|
||||
else
|
||||
regs->bpc -= 4;
|
||||
prev_insn(regs);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* notification of userspace execution resumption
|
||||
* - triggered by current->work.notify_resume
|
||||
*/
|
||||
void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
|
||||
__u32 thread_info_flags)
|
||||
void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
|
||||
{
|
||||
/* Pending single-step? */
|
||||
if (thread_info_flags & _TIF_SINGLESTEP)
|
||||
|
@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
|
|||
|
||||
/* deal with pending signal delivery */
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs,oldset);
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
|
|
|
@ -1506,13 +1506,6 @@ handle_ill:
|
|||
}
|
||||
STD_ENDPROC(handle_ill)
|
||||
|
||||
.pushsection .rodata, "a"
|
||||
.align 8
|
||||
bpt_code:
|
||||
bpt
|
||||
ENDPROC(bpt_code)
|
||||
.popsection
|
||||
|
||||
/* Various stub interrupt handlers and syscall handlers */
|
||||
|
||||
STD_ENTRY_LOCAL(_kernel_double_fault)
|
||||
|
|
|
@ -168,6 +168,7 @@
|
|||
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
|
||||
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
|
||||
|
|
|
@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|||
const struct cpuid_bit *cb;
|
||||
|
||||
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
|
||||
{ X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
|
||||
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
|
||||
|
|
|
@ -361,6 +361,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|
|||
if (!rq_mergeable(req) || !rq_mergeable(next))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Don't merge file system requests and discard requests
|
||||
*/
|
||||
if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Don't merge discard requests and secure discard requests
|
||||
*/
|
||||
if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* not contiguous
|
||||
*/
|
||||
|
|
|
@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
|
|||
* user_data: A pointer the data that is copied to the buffer.
|
||||
* size: The Number of bytes to copy.
|
||||
*/
|
||||
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
|
||||
void __user *user_data, int size)
|
||||
int drm_buffer_copy_from_user(struct drm_buffer *buf,
|
||||
void __user *user_data, int size)
|
||||
{
|
||||
int nr_pages = size / PAGE_SIZE + 1;
|
||||
int idx;
|
||||
|
@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
|
|||
{
|
||||
int idx = drm_buffer_index(buf);
|
||||
int page = drm_buffer_page(buf);
|
||||
void *obj = 0;
|
||||
void *obj = NULL;
|
||||
|
||||
if (idx + objsize <= PAGE_SIZE) {
|
||||
obj = &buf->data[page][idx];
|
||||
|
|
|
@ -142,7 +142,7 @@ int drm_gem_object_init(struct drm_device *dev,
|
|||
return -ENOMEM;
|
||||
|
||||
kref_init(&obj->refcount);
|
||||
kref_init(&obj->handlecount);
|
||||
atomic_set(&obj->handle_count, 0);
|
||||
obj->size = size;
|
||||
|
||||
return 0;
|
||||
|
@ -448,26 +448,6 @@ drm_gem_object_free(struct kref *kref)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free);
|
||||
|
||||
/**
|
||||
* Called after the last reference to the object has been lost.
|
||||
* Must be called without holding struct_mutex
|
||||
*
|
||||
* Frees the object
|
||||
*/
|
||||
void
|
||||
drm_gem_object_free_unlocked(struct kref *kref)
|
||||
{
|
||||
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
if (dev->driver->gem_free_object != NULL) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->driver->gem_free_object(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
|
||||
|
||||
static void drm_gem_object_ref_bug(struct kref *list_kref)
|
||||
{
|
||||
BUG();
|
||||
|
@ -480,12 +460,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
|
|||
* called before drm_gem_object_free or we'll be touching
|
||||
* freed memory
|
||||
*/
|
||||
void
|
||||
drm_gem_object_handle_free(struct kref *kref)
|
||||
void drm_gem_object_handle_free(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_object *obj = container_of(kref,
|
||||
struct drm_gem_object,
|
||||
handlecount);
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
/* Remove any name for this object */
|
||||
|
@ -512,6 +488,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
|
|||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
|
||||
drm_gem_object_reference(obj);
|
||||
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
drm_vm_open_locked(vma);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_vm_open);
|
||||
|
||||
|
@ -519,7 +499,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
|
|||
{
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
drm_vm_close_locked(vma);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_vm_close);
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
|
|||
|
||||
seq_printf(m, "%6d %8zd %7d %8d\n",
|
||||
obj->name, obj->size,
|
||||
atomic_read(&obj->handlecount.refcount),
|
||||
atomic_read(&obj->handle_count),
|
||||
atomic_read(&obj->refcount.refcount));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -433,6 +433,25 @@ static void drm_vm_open(struct vm_area_struct *vma)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
void drm_vm_close_locked(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *priv = vma->vm_file->private_data;
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_vma_entry *pt, *temp;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
atomic_dec(&dev->vma_count);
|
||||
|
||||
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
|
||||
if (pt->vma == vma) {
|
||||
list_del(&pt->head);
|
||||
kfree(pt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \c close method for all virtual memory types.
|
||||
*
|
||||
|
@ -445,20 +464,9 @@ static void drm_vm_close(struct vm_area_struct *vma)
|
|||
{
|
||||
struct drm_file *priv = vma->vm_file->private_data;
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_vma_entry *pt, *temp;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
atomic_dec(&dev->vma_count);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
|
||||
if (pt->vma == vma) {
|
||||
list_del(&pt->head);
|
||||
kfree(pt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
drm_vm_close_locked(vma);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
static const struct file_operations i810_buffer_fops = {
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = i810_ioctl,
|
||||
.mmap = i810_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
};
|
||||
|
|
|
@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
static const struct file_operations i830_buffer_fops = {
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = i830_ioctl,
|
||||
.mmap = i830_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
};
|
||||
|
|
|
@ -244,14 +244,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
return -ENOMEM;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Sink the floating reference from kref_init(handlecount) */
|
||||
drm_gem_object_handle_unreference_unlocked(obj);
|
||||
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -224,8 +224,10 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
|||
drm_fb_helper_fini(&ifbdev->helper);
|
||||
|
||||
drm_framebuffer_cleanup(&ifb->base);
|
||||
if (ifb->obj)
|
||||
if (ifb->obj) {
|
||||
drm_gem_object_handle_unreference_unlocked(ifb->obj);
|
||||
drm_gem_object_unreference_unlocked(ifb->obj);
|
||||
}
|
||||
}
|
||||
|
||||
int intel_fbdev_init(struct drm_device *dev)
|
||||
|
|
|
@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
|
|||
if (nv_encoder->dcb->type == OUTPUT_LVDS &&
|
||||
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
|
||||
dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
|
||||
nv_connector->native_mode = drm_mode_create(dev);
|
||||
nouveau_bios_fp_mode(dev, nv_connector->native_mode);
|
||||
struct drm_display_mode mode;
|
||||
|
||||
nouveau_bios_fp_mode(dev, &mode);
|
||||
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
|
||||
}
|
||||
|
||||
/* Find the native mode if this is a digital panel, if we didn't
|
||||
|
|
|
@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
|
|||
|
||||
if (nouveau_fb->nvbo) {
|
||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||
drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
|
||||
nouveau_fb->nvbo = NULL;
|
||||
}
|
||||
|
|
|
@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
|||
goto out;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
out:
|
||||
drm_gem_object_handle_unreference_unlocked(nvbo->gem);
|
||||
|
||||
if (ret)
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
nouveau_bo_unpin(chan->notifier_bo);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
|
||||
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
|
||||
drm_mm_takedown(&chan->notifier_heap);
|
||||
}
|
||||
|
|
|
@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
|
|||
#define SW_I2C_CNTL_WRITE1BIT 6
|
||||
|
||||
//==============================VESA definition Portion===============================
|
||||
#define VESA_OEM_PRODUCT_REV '01.00'
|
||||
#define VESA_OEM_PRODUCT_REV "01.00"
|
||||
#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
|
||||
#define VESA_MODE_WIN_ATTRIBUTE 7
|
||||
#define VESA_WIN_SIZE 64
|
||||
|
|
|
@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
|
|||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test succeeded in %u usecs\n", i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
|
||||
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
|
|||
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
|
||||
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
|
||||
*/
|
||||
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
|
||||
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
|
||||
rdev->vram_scratch.ptr) {
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp;
|
||||
|
||||
|
|
|
@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
|||
*connector_type = DRM_MODE_CONNECTOR_DVID;
|
||||
}
|
||||
|
||||
/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
|
||||
if ((dev->pdev->device == 0x796e) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1462) &&
|
||||
(dev->pdev->subsystem_device == 0x7302)) {
|
||||
if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
|
||||
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
|
||||
return false;
|
||||
}
|
||||
|
||||
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
|
||||
if ((dev->pdev->device == 0x7941) &&
|
||||
(dev->pdev->subsystem_vendor == 0x147b) &&
|
||||
|
|
|
@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
|
|||
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
|
||||
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP6_SUPPORT)
|
||||
DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_TV1_SUPPORT)
|
||||
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_CV_SUPPORT)
|
||||
|
@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|||
{
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
|
||||
if (radeon_fb->obj)
|
||||
if (radeon_fb->obj) {
|
||||
drm_gem_object_unreference_unlocked(radeon_fb->obj);
|
||||
}
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(radeon_fb);
|
||||
}
|
||||
|
|
|
@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
|
|||
ret = radeon_bo_reserve(rbo, false);
|
||||
if (likely(ret == 0)) {
|
||||
radeon_bo_kunmap(rbo);
|
||||
radeon_bo_unpin(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
drm_gem_object_handle_unreference(gobj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
}
|
||||
|
||||
|
@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
|
|||
{
|
||||
struct fb_info *info;
|
||||
struct radeon_framebuffer *rfb = &rfbdev->rfb;
|
||||
struct radeon_bo *rbo;
|
||||
int r;
|
||||
|
||||
if (rfbdev->helper.fbdev) {
|
||||
info = rfbdev->helper.fbdev;
|
||||
|
@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
|
|||
}
|
||||
|
||||
if (rfb->obj) {
|
||||
rbo = rfb->obj->driver_private;
|
||||
r = radeon_bo_reserve(rbo, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rbo);
|
||||
radeon_bo_unpin(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(rfb->obj);
|
||||
radeonfb_destroy_pinned_object(rfb->obj);
|
||||
rfb->obj = NULL;
|
||||
}
|
||||
drm_fb_helper_fini(&rfbdev->helper);
|
||||
drm_framebuffer_cleanup(&rfb->base);
|
||||
|
|
|
@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
return r;
|
||||
}
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
drm_gem_object_handle_unreference_unlocked(gobj);
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
*/
|
||||
int radeon_driver_firstopen_kms(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (rdev->powered_down)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
|||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
fbo->vm_node = NULL;
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
kref_init(&fbo->list_kref);
|
||||
|
|
|
@ -69,7 +69,7 @@ struct ttm_page_pool {
|
|||
spinlock_t lock;
|
||||
bool fill_lock;
|
||||
struct list_head list;
|
||||
int gfp_flags;
|
||||
gfp_t gfp_flags;
|
||||
unsigned npages;
|
||||
char *name;
|
||||
unsigned long nfrees;
|
||||
|
@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
|
|||
* This function is reentrant if caller updates count depending on number of
|
||||
* pages returned in pages array.
|
||||
*/
|
||||
static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
|
||||
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
|
||||
{
|
||||
struct page **caching_array;
|
||||
|
@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
|
|||
{
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct page *p = NULL;
|
||||
int gfp_flags = GFP_USER;
|
||||
gfp_t gfp_flags = GFP_USER;
|
||||
int r;
|
||||
|
||||
/* set zero flag for page allocation if required */
|
||||
|
@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ttm_page_alloc_fini()
|
||||
void ttm_page_alloc_fini(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
|
|||
{0, 0, 0}
|
||||
};
|
||||
|
||||
static char *vmw_devname = "vmwgfx";
|
||||
static int enable_fbdev;
|
||||
|
||||
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
||||
static void vmw_master_init(struct vmw_master *);
|
||||
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *ptr);
|
||||
|
||||
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
||||
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
||||
|
||||
static void vmw_print_capabilities(uint32_t capabilities)
|
||||
{
|
||||
DRM_INFO("Capabilities:\n");
|
||||
|
@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
vmw_kms_save_vga(dev_priv);
|
||||
|
||||
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Unable to initialize FIFO.\n");
|
||||
|
@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
|
|||
static void vmw_release_device(struct vmw_private *dev_priv)
|
||||
{
|
||||
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
}
|
||||
|
||||
int vmw_3d_resource_inc(struct vmw_private *dev_priv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->release_mutex);
|
||||
if (unlikely(dev_priv->num_3d_resources++ == 0)) {
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
--dev_priv->num_3d_resources;
|
||||
}
|
||||
mutex_unlock(&dev_priv->release_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void vmw_3d_resource_dec(struct vmw_private *dev_priv)
|
||||
{
|
||||
int32_t n3d;
|
||||
|
||||
mutex_lock(&dev_priv->release_mutex);
|
||||
if (unlikely(--dev_priv->num_3d_resources == 0))
|
||||
vmw_release_device(dev_priv);
|
||||
n3d = (int32_t) dev_priv->num_3d_resources;
|
||||
mutex_unlock(&dev_priv->release_mutex);
|
||||
|
||||
BUG_ON(n3d < 0);
|
||||
}
|
||||
|
||||
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
{
|
||||
|
@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
dev_priv->last_read_sequence = (uint32_t) -100;
|
||||
mutex_init(&dev_priv->hw_mutex);
|
||||
mutex_init(&dev_priv->cmdbuf_mutex);
|
||||
mutex_init(&dev_priv->release_mutex);
|
||||
rwlock_init(&dev_priv->resource_lock);
|
||||
idr_init(&dev_priv->context_idr);
|
||||
idr_init(&dev_priv->surface_idr);
|
||||
|
@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
|
||||
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
|
||||
|
||||
dev_priv->enable_fb = enable_fbdev;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
|
||||
|
@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
|
||||
dev->dev_private = dev_priv;
|
||||
|
||||
if (!dev->devname)
|
||||
dev->devname = vmw_devname;
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
|
||||
ret = drm_irq_install(dev);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed installing irq: %d\n", ret);
|
||||
goto out_no_irq;
|
||||
}
|
||||
}
|
||||
|
||||
ret = pci_request_regions(dev->pdev, "vmwgfx probe");
|
||||
dev_priv->stealth = (ret != 0);
|
||||
if (dev_priv->stealth) {
|
||||
|
@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
goto out_no_device;
|
||||
}
|
||||
}
|
||||
ret = vmw_request_device(dev_priv);
|
||||
ret = vmw_kms_init(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_device;
|
||||
vmw_kms_init(dev_priv);
|
||||
goto out_no_kms;
|
||||
vmw_overlay_init(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
if (dev_priv->enable_fb) {
|
||||
ret = vmw_3d_resource_inc(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_fifo;
|
||||
vmw_kms_save_vga(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
|
||||
"Detected device 3D availability.\n" :
|
||||
"Detected no device 3D availability.\n");
|
||||
} else {
|
||||
DRM_INFO("Delayed 3D detection since we're not "
|
||||
"running the device in SVGA mode yet.\n");
|
||||
}
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
|
||||
ret = drm_irq_install(dev);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed installing irq: %d\n", ret);
|
||||
goto out_no_irq;
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
|
||||
register_pm_notifier(&dev_priv->pm_nb);
|
||||
|
||||
DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_device:
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
if (dev->devname == vmw_devname)
|
||||
dev->devname = NULL;
|
||||
out_no_irq:
|
||||
if (dev_priv->enable_fb) {
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
}
|
||||
out_no_fifo:
|
||||
vmw_overlay_close(dev_priv);
|
||||
vmw_kms_close(dev_priv);
|
||||
out_no_kms:
|
||||
if (dev_priv->stealth)
|
||||
pci_release_region(dev->pdev, 2);
|
||||
else
|
||||
pci_release_regions(dev->pdev);
|
||||
out_no_device:
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
out_err4:
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
|
@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|||
|
||||
unregister_pm_notifier(&dev_priv->pm_nb);
|
||||
|
||||
vmw_fb_close(dev_priv);
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
if (dev_priv->enable_fb) {
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
}
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
vmw_release_device(dev_priv);
|
||||
if (dev_priv->stealth)
|
||||
pci_release_region(dev->pdev, 2);
|
||||
else
|
||||
pci_release_regions(dev->pdev);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
if (dev->devname == vmw_devname)
|
||||
dev->devname = NULL;
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
|
@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
|||
struct drm_ioctl_desc *ioctl =
|
||||
&vmw_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
if (unlikely(ioctl->cmd != cmd)) {
|
||||
if (unlikely(ioctl->cmd_drv != cmd)) {
|
||||
DRM_ERROR("Invalid command format, ioctl %d\n",
|
||||
nr - DRM_COMMAND_BASE);
|
||||
return -EINVAL;
|
||||
|
@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
|
|||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_priv->enable_fb) {
|
||||
ret = vmw_3d_resource_inc(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
vmw_kms_save_vga(dev_priv);
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
if (active) {
|
||||
BUG_ON(active != &dev_priv->fbdev_master);
|
||||
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
|
||||
|
@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
|
|||
return 0;
|
||||
|
||||
out_no_active_lock:
|
||||
vmw_release_device(dev_priv);
|
||||
if (!dev_priv->enable_fb) {
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||
|
||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||
|
||||
if (!dev_priv->enable_fb) {
|
||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Unable to clean VRAM on master drop.\n");
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
vmw_kms_restore_vga(dev_priv);
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
}
|
||||
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
||||
|
||||
vmw_fb_on(dev_priv);
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
|
@ -722,6 +796,7 @@ static struct drm_driver driver = {
|
|||
.irq_postinstall = vmw_irq_postinstall,
|
||||
.irq_uninstall = vmw_irq_uninstall,
|
||||
.irq_handler = vmw_irq_handler,
|
||||
.get_vblank_counter = vmw_get_vblank_counter,
|
||||
.reclaim_buffers_locked = NULL,
|
||||
.ioctls = vmw_ioctls,
|
||||
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
|
||||
|
|
|
@ -277,6 +277,7 @@ struct vmw_private {
|
|||
|
||||
bool stealth;
|
||||
bool is_opened;
|
||||
bool enable_fb;
|
||||
|
||||
/**
|
||||
* Master management.
|
||||
|
@ -285,6 +286,9 @@ struct vmw_private {
|
|||
struct vmw_master *active_master;
|
||||
struct vmw_master fbdev_master;
|
||||
struct notifier_block pm_nb;
|
||||
|
||||
struct mutex release_mutex;
|
||||
uint32_t num_3d_resources;
|
||||
};
|
||||
|
||||
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
|
||||
|
@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
|
|||
return val;
|
||||
}
|
||||
|
||||
int vmw_3d_resource_inc(struct vmw_private *dev_priv);
|
||||
void vmw_3d_resource_dec(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* GMR utilities - vmwgfx_gmr.c
|
||||
*/
|
||||
|
@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
|||
unsigned bbp, unsigned depth);
|
||||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
|
||||
|
||||
/**
|
||||
* Overlay control - vmwgfx_overlay.c
|
||||
|
|
|
@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
|||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.mm_node->start < bo->num_pages)
|
||||
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
|
||||
false, false);
|
||||
|
||||
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
|
||||
|
||||
/* Could probably bug on */
|
||||
|
|
|
@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
mutex_lock(&dev_priv->hw_mutex);
|
||||
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
|
||||
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
|
||||
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
|
||||
|
||||
min = 4;
|
||||
|
@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
dev_priv->config_done_state);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
dev_priv->enable_state);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES,
|
||||
dev_priv->traces_state);
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
vmw_fence_queue_takedown(&fifo->fence_queue);
|
||||
|
|
|
@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
|
|||
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
|
||||
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
if (i == 0 && vmw_priv->num_displays == 1 &&
|
||||
save->width == 0 && save->height == 0) {
|
||||
|
||||
/*
|
||||
* It should be fairly safe to assume that these
|
||||
* values are uninitialized.
|
||||
*/
|
||||
|
||||
save->width = vmw_priv->vga_width - save->pos_x;
|
||||
save->height = vmw_priv->vga_height - save->pos_y;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -984,3 +996,8 @@ out_unlock:
|
|||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
#define VMWGFX_LDU_NUM_DU 8
|
||||
|
||||
#define vmw_crtc_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.crtc)
|
||||
#define vmw_encoder_to_ldu(x) \
|
||||
|
@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->ldu_priv) {
|
||||
DRM_INFO("ldu system already on\n");
|
||||
return -EINVAL;
|
||||
|
@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
|||
|
||||
drm_mode_create_dirty_info_property(dev_priv->dev);
|
||||
|
||||
vmw_ldu_init(dev_priv, 0);
|
||||
/* for old hardware without multimon only enable one display */
|
||||
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
|
||||
vmw_ldu_init(dev_priv, 1);
|
||||
vmw_ldu_init(dev_priv, 2);
|
||||
vmw_ldu_init(dev_priv, 3);
|
||||
vmw_ldu_init(dev_priv, 4);
|
||||
vmw_ldu_init(dev_priv, 5);
|
||||
vmw_ldu_init(dev_priv, 6);
|
||||
vmw_ldu_init(dev_priv, 7);
|
||||
for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
|
||||
vmw_ldu_init(dev_priv, i);
|
||||
ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
|
||||
} else {
|
||||
/* for old hardware without multimon only enable one display */
|
||||
vmw_ldu_init(dev_priv, 0);
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
if (!dev_priv->ldu_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
|
|
|
@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
|
|||
cmd->body.cid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
}
|
||||
|
||||
static int vmw_context_init(struct vmw_private *dev_priv,
|
||||
|
@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
|
|||
cmd->body.cid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
(void) vmw_3d_resource_inc(dev_priv);
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
return 0;
|
||||
}
|
||||
|
@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
|||
cmd->body.sid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
}
|
||||
|
||||
void vmw_surface_res_free(struct vmw_resource *res)
|
||||
|
@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
(void) vmw_3d_resource_inc(dev_priv);
|
||||
vmw_resource_activate(res, vmw_hw_surface_destroy);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
|
|||
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
|
||||
}
|
||||
|
||||
void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
|
||||
static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
|
||||
{
|
||||
struct vga_device *vgadev;
|
||||
unsigned long flags;
|
||||
|
|
|
@ -409,7 +409,7 @@ config SENSORS_CORETEMP
|
|||
|
||||
config SENSORS_PKGTEMP
|
||||
tristate "Intel processor package temperature sensor"
|
||||
depends on X86 && PCI && EXPERIMENTAL
|
||||
depends on X86 && EXPERIMENTAL
|
||||
help
|
||||
If you say yes here you get support for the package level temperature
|
||||
sensor inside your CPU. Check documentation/driver for details.
|
||||
|
|
|
@ -423,9 +423,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
|
|||
int err;
|
||||
struct platform_device *pdev;
|
||||
struct pdev_entry *pdev_entry;
|
||||
#ifdef CONFIG_SMP
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
|
||||
* sensors. We check this bit only, all the early CPUs
|
||||
* without thermal sensors will be filtered out.
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_DTS)) {
|
||||
printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
|
||||
" has no thermal sensor.\n", c->x86_model);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&pdev_list_mutex);
|
||||
|
||||
|
@ -482,14 +491,22 @@ exit:
|
|||
|
||||
static void coretemp_device_remove(unsigned int cpu)
|
||||
{
|
||||
struct pdev_entry *p, *n;
|
||||
struct pdev_entry *p;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&pdev_list_mutex);
|
||||
list_for_each_entry_safe(p, n, &pdev_list, list) {
|
||||
if (p->cpu == cpu) {
|
||||
platform_device_unregister(p->pdev);
|
||||
list_del(&p->list);
|
||||
kfree(p);
|
||||
}
|
||||
list_for_each_entry(p, &pdev_list, list) {
|
||||
if (p->cpu != cpu)
|
||||
continue;
|
||||
|
||||
platform_device_unregister(p->pdev);
|
||||
list_del(&p->list);
|
||||
mutex_unlock(&pdev_list_mutex);
|
||||
kfree(p);
|
||||
for_each_cpu(i, cpu_sibling_mask(cpu))
|
||||
if (i != cpu && !coretemp_device_add(i))
|
||||
break;
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&pdev_list_mutex);
|
||||
}
|
||||
|
@ -527,30 +544,21 @@ static int __init coretemp_init(void)
|
|||
if (err)
|
||||
goto exit;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(i);
|
||||
/*
|
||||
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
|
||||
* sensors. We check this bit only, all the early CPUs
|
||||
* without thermal sensors will be filtered out.
|
||||
*/
|
||||
if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
|
||||
coretemp_device_add(i);
|
||||
else {
|
||||
printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
|
||||
" has no thermal sensor.\n", c->x86_model);
|
||||
}
|
||||
}
|
||||
for_each_online_cpu(i)
|
||||
coretemp_device_add(i);
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
if (list_empty(&pdev_list)) {
|
||||
err = -ENODEV;
|
||||
goto exit_driver_unreg;
|
||||
}
|
||||
#endif
|
||||
|
||||
register_hotcpu_notifier(&coretemp_cpu_notifier);
|
||||
return 0;
|
||||
|
||||
exit_driver_unreg:
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
exit_driver_unreg:
|
||||
platform_driver_unregister(&coretemp_driver);
|
||||
#endif
|
||||
exit:
|
||||
|
|
|
@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
|
|||
wake_up_interruptible(&lis3_dev.misc_wait);
|
||||
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
|
||||
out:
|
||||
if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
|
||||
if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
|
||||
lis3_dev.idev->input->users)
|
||||
return IRQ_WAKE_THREAD;
|
||||
return IRQ_HANDLED;
|
||||
|
@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
|
|||
* io-apic is not configurable (and generates a warning) but I keep it
|
||||
* in case of support for other hardware.
|
||||
*/
|
||||
if (dev->whoami == WAI_8B)
|
||||
if (dev->pdata && dev->whoami == WAI_8B)
|
||||
thread_fn = lis302dl_interrupt_thread1_8b;
|
||||
else
|
||||
thread_fn = NULL;
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
|
@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
|
|||
|
||||
err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
|
||||
if (err)
|
||||
goto exit_free;
|
||||
goto exit_dev;
|
||||
|
||||
data->hwmon_dev = hwmon_device_register(&pdev->dev);
|
||||
if (IS_ERR(data->hwmon_dev)) {
|
||||
|
@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
|
|||
|
||||
exit_class:
|
||||
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
|
||||
exit_dev:
|
||||
device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
|
||||
exit_free:
|
||||
kfree(data);
|
||||
exit:
|
||||
|
@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
|
|||
|
||||
hwmon_device_unregister(data->hwmon_dev);
|
||||
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
|
||||
device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
kfree(data);
|
||||
return 0;
|
||||
|
@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
|
|||
int err;
|
||||
struct platform_device *pdev;
|
||||
struct pdev_entry *pdev_entry;
|
||||
#ifdef CONFIG_SMP
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
#endif
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_PTS))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&pdev_list_mutex);
|
||||
|
||||
|
@ -339,17 +342,18 @@ exit:
|
|||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void pkgtemp_device_remove(unsigned int cpu)
|
||||
{
|
||||
struct pdev_entry *p, *n;
|
||||
struct pdev_entry *p;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
mutex_lock(&pdev_list_mutex);
|
||||
list_for_each_entry_safe(p, n, &pdev_list, list) {
|
||||
list_for_each_entry(p, &pdev_list, list) {
|
||||
if (p->cpu != cpu)
|
||||
continue;
|
||||
|
||||
platform_device_unregister(p->pdev);
|
||||
list_del(&p->list);
|
||||
mutex_unlock(&pdev_list_mutex);
|
||||
kfree(p);
|
||||
for_each_cpu(i, cpu_core_mask(cpu)) {
|
||||
if (i != cpu) {
|
||||
|
@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
|
|||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&pdev_list_mutex);
|
||||
}
|
||||
|
@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
|
|||
goto exit;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(i);
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_PTS))
|
||||
continue;
|
||||
|
||||
err = pkgtemp_device_add(i);
|
||||
if (err)
|
||||
goto exit_devices_unreg;
|
||||
|
|
|
@ -80,5 +80,4 @@ struct st_proto_s {
|
|||
extern long st_register(struct st_proto_s *);
|
||||
extern long st_unregister(enum proto_type);
|
||||
|
||||
extern struct platform_device *st_get_plat_device(void);
|
||||
#endif /* ST_H */
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include "st_ll.h"
|
||||
#include "st.h"
|
||||
|
||||
#define VERBOSE
|
||||
/* strings to be used for rfkill entries and by
|
||||
* ST Core to be used for sysfs debug entry
|
||||
*/
|
||||
|
@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
|
|||
long err = 0;
|
||||
unsigned long flags = 0;
|
||||
|
||||
st_kim_ref(&st_gdata);
|
||||
st_kim_ref(&st_gdata, 0);
|
||||
pr_info("%s(%d) ", __func__, new_proto->type);
|
||||
if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
|
||||
|| new_proto->reg_complete_cb == NULL) {
|
||||
|
@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
|
|||
|
||||
pr_debug("%s: %d ", __func__, type);
|
||||
|
||||
st_kim_ref(&st_gdata);
|
||||
st_kim_ref(&st_gdata, 0);
|
||||
if (type < ST_BT || type >= ST_MAX) {
|
||||
pr_err(" protocol %d not supported", type);
|
||||
return -EPROTONOSUPPORT;
|
||||
|
@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
|
|||
#endif
|
||||
long len;
|
||||
|
||||
st_kim_ref(&st_gdata);
|
||||
st_kim_ref(&st_gdata, 0);
|
||||
if (unlikely(skb == NULL || st_gdata == NULL
|
||||
|| st_gdata->tty == NULL)) {
|
||||
pr_err("data/tty unavailable to perform write");
|
||||
|
@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
|
|||
struct st_data_s *st_gdata;
|
||||
pr_info("%s ", __func__);
|
||||
|
||||
st_kim_ref(&st_gdata);
|
||||
st_kim_ref(&st_gdata, 0);
|
||||
st_gdata->tty = tty;
|
||||
tty->disc_data = st_gdata;
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
|
|||
void st_core_exit(struct st_data_s *);
|
||||
|
||||
/* ask for reference from KIM */
|
||||
void st_kim_ref(struct st_data_s **);
|
||||
void st_kim_ref(struct st_data_s **, int);
|
||||
|
||||
#define GPS_STUB_TEST
|
||||
#ifdef GPS_STUB_TEST
|
||||
|
|
|
@ -72,10 +72,25 @@ const unsigned char *protocol_names[] = {
|
|||
PROTO_ENTRY(ST_GPS, "GPS"),
|
||||
};
|
||||
|
||||
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
|
||||
struct platform_device *st_kim_devices[MAX_ST_DEVICES];
|
||||
|
||||
/**********************************************************************/
|
||||
/* internal functions */
|
||||
|
||||
/**
|
||||
* st_get_plat_device -
|
||||
* function which returns the reference to the platform device
|
||||
* requested by id. As of now only 1 such device exists (id=0)
|
||||
* the context requesting for reference can get the id to be
|
||||
* requested by a. The protocol driver which is registering or
|
||||
* b. the tty device which is opened.
|
||||
*/
|
||||
static struct platform_device *st_get_plat_device(int id)
|
||||
{
|
||||
return st_kim_devices[id];
|
||||
}
|
||||
|
||||
/**
|
||||
* validate_firmware_response -
|
||||
* function to return whether the firmware response was proper
|
||||
|
@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
|
|||
struct kim_data_s *kim_gdata;
|
||||
pr_info(" %s ", __func__);
|
||||
|
||||
kim_pdev = st_get_plat_device();
|
||||
kim_pdev = st_get_plat_device(0);
|
||||
kim_gdata = dev_get_drvdata(&kim_pdev->dev);
|
||||
|
||||
if (kim_gdata->gpios[type] == -1) {
|
||||
|
@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
|
|||
* This would enable multiple such platform devices to exist
|
||||
* on a given platform
|
||||
*/
|
||||
void st_kim_ref(struct st_data_s **core_data)
|
||||
void st_kim_ref(struct st_data_s **core_data, int id)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
struct kim_data_s *kim_gdata;
|
||||
/* get kim_gdata reference from platform device */
|
||||
pdev = st_get_plat_device();
|
||||
pdev = st_get_plat_device(id);
|
||||
kim_gdata = dev_get_drvdata(&pdev->dev);
|
||||
*core_data = kim_gdata->core_data;
|
||||
}
|
||||
|
@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
|
|||
long *gpios = pdev->dev.platform_data;
|
||||
struct kim_data_s *kim_gdata;
|
||||
|
||||
st_kim_devices[pdev->id] = pdev;
|
||||
kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
|
||||
if (!kim_gdata) {
|
||||
pr_err("no mem to allocate");
|
||||
|
|
|
@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
|
|||
If you are unsure about this, say N here.
|
||||
|
||||
config USB_SUSPEND
|
||||
bool "USB runtime power management (suspend/resume and wakeup)"
|
||||
bool "USB runtime power management (autosuspend) and wakeup"
|
||||
depends on USB && PM_RUNTIME
|
||||
help
|
||||
If you say Y here, you can use driver calls or the sysfs
|
||||
"power/level" file to suspend or resume individual USB
|
||||
peripherals and to enable or disable autosuspend (see
|
||||
"power/control" file to enable or disable autosuspend for
|
||||
individual USB peripherals (see
|
||||
Documentation/usb/power-management.txt for more details).
|
||||
|
||||
Also, USB "remote wakeup" signaling is supported, whereby some
|
||||
|
|
|
@ -159,9 +159,9 @@ void usb_major_cleanup(void)
|
|||
int usb_register_dev(struct usb_interface *intf,
|
||||
struct usb_class_driver *class_driver)
|
||||
{
|
||||
int retval = -EINVAL;
|
||||
int retval;
|
||||
int minor_base = class_driver->minor_base;
|
||||
int minor = 0;
|
||||
int minor;
|
||||
char name[20];
|
||||
char *temp;
|
||||
|
||||
|
@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
|
|||
*/
|
||||
minor_base = 0;
|
||||
#endif
|
||||
intf->minor = -1;
|
||||
|
||||
dbg ("looking for a minor, starting at %d", minor_base);
|
||||
|
||||
if (class_driver->fops == NULL)
|
||||
goto exit;
|
||||
return -EINVAL;
|
||||
if (intf->minor >= 0)
|
||||
return -EADDRINUSE;
|
||||
|
||||
retval = init_usb_class();
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
|
||||
|
||||
down_write(&minor_rwsem);
|
||||
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
|
||||
|
@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
|
|||
continue;
|
||||
|
||||
usb_minors[minor] = class_driver->fops;
|
||||
|
||||
retval = 0;
|
||||
intf->minor = minor;
|
||||
break;
|
||||
}
|
||||
up_write(&minor_rwsem);
|
||||
|
||||
if (retval)
|
||||
goto exit;
|
||||
|
||||
retval = init_usb_class();
|
||||
if (retval)
|
||||
goto exit;
|
||||
|
||||
intf->minor = minor;
|
||||
if (intf->minor < 0)
|
||||
return -EXFULL;
|
||||
|
||||
/* create a usb class device for this usb interface */
|
||||
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
|
||||
|
@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
|
|||
"%s", temp);
|
||||
if (IS_ERR(intf->usb_dev)) {
|
||||
down_write(&minor_rwsem);
|
||||
usb_minors[intf->minor] = NULL;
|
||||
usb_minors[minor] = NULL;
|
||||
intf->minor = -1;
|
||||
up_write(&minor_rwsem);
|
||||
retval = PTR_ERR(intf->usb_dev);
|
||||
}
|
||||
exit:
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_register_dev);
|
||||
|
|
|
@ -1802,6 +1802,7 @@ free_interfaces:
|
|||
intf->dev.groups = usb_interface_groups;
|
||||
intf->dev.dma_mask = dev->dev.dma_mask;
|
||||
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
|
||||
intf->minor = -1;
|
||||
device_initialize(&intf->dev);
|
||||
dev_set_name(&intf->dev, "%d-%s:%d.%d",
|
||||
dev->bus->busnum, dev->devpath,
|
||||
|
|
|
@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
|
|||
index, transmit ? 'T' : 'R', cppi_ch);
|
||||
cppi_ch->hw_ep = ep;
|
||||
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
|
||||
cppi_ch->channel.max_len = 0x7fffffff;
|
||||
|
||||
DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
|
||||
return &cppi_ch->channel;
|
||||
|
|
|
@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
#ifndef CONFIG_MUSB_PIO_ONLY
|
||||
if (is_dma_capable() && musb_ep->dma) {
|
||||
struct dma_controller *c = musb->dma_controller;
|
||||
size_t request_size;
|
||||
|
||||
/* setup DMA, then program endpoint CSR */
|
||||
request_size = min_t(size_t, request->length - request->actual,
|
||||
musb_ep->dma->max_len);
|
||||
|
||||
use_dma = (request->dma != DMA_ADDR_INVALID);
|
||||
|
||||
|
@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
|
||||
#ifdef CONFIG_USB_INVENTRA_DMA
|
||||
{
|
||||
size_t request_size;
|
||||
|
||||
/* setup DMA, then program endpoint CSR */
|
||||
request_size = min_t(size_t, request->length,
|
||||
musb_ep->dma->max_len);
|
||||
if (request_size < musb_ep->packet_sz)
|
||||
musb_ep->dma->desired_mode = 0;
|
||||
else
|
||||
|
@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
use_dma = use_dma && c->channel_program(
|
||||
musb_ep->dma, musb_ep->packet_sz,
|
||||
0,
|
||||
request->dma,
|
||||
request->length);
|
||||
request->dma + request->actual,
|
||||
request_size);
|
||||
if (!use_dma) {
|
||||
c->channel_release(musb_ep->dma);
|
||||
musb_ep->dma = NULL;
|
||||
|
@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
use_dma = use_dma && c->channel_program(
|
||||
musb_ep->dma, musb_ep->packet_sz,
|
||||
request->zero,
|
||||
request->dma,
|
||||
request->length);
|
||||
request->dma + request->actual,
|
||||
request_size);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
|
|||
request->zero = 0;
|
||||
}
|
||||
|
||||
/* ... or if not, then complete it. */
|
||||
musb_g_giveback(musb_ep, request, 0);
|
||||
|
||||
/*
|
||||
* Kickstart next transfer if appropriate;
|
||||
* the packet that just completed might not
|
||||
* be transmitted for hours or days.
|
||||
* REVISIT for double buffering...
|
||||
* FIXME revisit for stalls too...
|
||||
*/
|
||||
musb_ep_select(mbase, epnum);
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
|
||||
return;
|
||||
|
||||
request = musb_ep->desc ? next_request(musb_ep) : NULL;
|
||||
if (!request) {
|
||||
DBG(4, "%s idle now\n",
|
||||
musb_ep->end_point.name);
|
||||
return;
|
||||
if (request->actual == request->length) {
|
||||
musb_g_giveback(musb_ep, request, 0);
|
||||
request = musb_ep->desc ? next_request(musb_ep) : NULL;
|
||||
if (!request) {
|
||||
DBG(4, "%s idle now\n",
|
||||
musb_ep->end_point.name);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
|
|||
{
|
||||
const u8 epnum = req->epnum;
|
||||
struct usb_request *request = &req->request;
|
||||
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
|
||||
struct musb_ep *musb_ep;
|
||||
void __iomem *epio = musb->endpoints[epnum].regs;
|
||||
unsigned fifo_count = 0;
|
||||
u16 len = musb_ep->packet_sz;
|
||||
u16 len;
|
||||
u16 csr = musb_readw(epio, MUSB_RXCSR);
|
||||
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
|
||||
|
||||
if (hw_ep->is_shared_fifo)
|
||||
musb_ep = &hw_ep->ep_in;
|
||||
else
|
||||
musb_ep = &hw_ep->ep_out;
|
||||
|
||||
len = musb_ep->packet_sz;
|
||||
|
||||
/* We shouldn't get here while DMA is active, but we do... */
|
||||
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
|
||||
|
@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
|
|||
*/
|
||||
|
||||
csr |= MUSB_RXCSR_DMAENAB;
|
||||
#ifdef USE_MODE1
|
||||
csr |= MUSB_RXCSR_AUTOCLEAR;
|
||||
#ifdef USE_MODE1
|
||||
/* csr |= MUSB_RXCSR_DMAMODE; */
|
||||
|
||||
/* this special sequence (enabling and then
|
||||
|
@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
|
|||
if (request->actual < request->length) {
|
||||
int transfer_size = 0;
|
||||
#ifdef USE_MODE1
|
||||
transfer_size = min(request->length,
|
||||
transfer_size = min(request->length - request->actual,
|
||||
channel->max_len);
|
||||
#else
|
||||
transfer_size = len;
|
||||
transfer_size = min(request->length - request->actual,
|
||||
(unsigned)len);
|
||||
#endif
|
||||
if (transfer_size <= musb_ep->packet_sz)
|
||||
musb_ep->dma->desired_mode = 0;
|
||||
|
@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
|
|||
u16 csr;
|
||||
struct usb_request *request;
|
||||
void __iomem *mbase = musb->mregs;
|
||||
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
|
||||
struct musb_ep *musb_ep;
|
||||
void __iomem *epio = musb->endpoints[epnum].regs;
|
||||
struct dma_channel *dma;
|
||||
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
|
||||
|
||||
if (hw_ep->is_shared_fifo)
|
||||
musb_ep = &hw_ep->ep_in;
|
||||
else
|
||||
musb_ep = &hw_ep->ep_out;
|
||||
|
||||
musb_ep_select(mbase, epnum);
|
||||
|
||||
|
@ -1081,7 +1084,7 @@ struct free_record {
|
|||
/*
|
||||
* Context: controller locked, IRQs blocked.
|
||||
*/
|
||||
static void musb_ep_restart(struct musb *musb, struct musb_request *req)
|
||||
void musb_ep_restart(struct musb *musb, struct musb_request *req)
|
||||
{
|
||||
DBG(3, "<== %s request %p len %u on hw_ep%d\n",
|
||||
req->tx ? "TX/IN" : "RX/OUT",
|
||||
|
|
|
@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
|
|||
|
||||
extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
|
||||
|
||||
extern void musb_ep_restart(struct musb *, struct musb_request *);
|
||||
|
||||
#endif /* __MUSB_GADGET_H */
|
||||
|
|
|
@ -261,6 +261,7 @@ __acquires(musb->lock)
|
|||
ctrlrequest->wIndex & 0x0f;
|
||||
struct musb_ep *musb_ep;
|
||||
struct musb_hw_ep *ep;
|
||||
struct musb_request *request;
|
||||
void __iomem *regs;
|
||||
int is_in;
|
||||
u16 csr;
|
||||
|
@ -302,6 +303,14 @@ __acquires(musb->lock)
|
|||
musb_writew(regs, MUSB_RXCSR, csr);
|
||||
}
|
||||
|
||||
/* Maybe start the first request in the queue */
|
||||
request = to_musb_request(
|
||||
next_request(musb_ep));
|
||||
if (!musb_ep->busy && request) {
|
||||
DBG(3, "restarting the request\n");
|
||||
musb_ep_restart(musb, request);
|
||||
}
|
||||
|
||||
/* select ep0 again */
|
||||
musb_ep_select(mbase, 0);
|
||||
} break;
|
||||
|
|
|
@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
|
|||
|
||||
qh->segsize = length;
|
||||
|
||||
/*
|
||||
* Ensure the data reaches to main memory before starting
|
||||
* DMA transfer
|
||||
*/
|
||||
wmb();
|
||||
|
||||
if (!dma->channel_program(channel, pkt_size, mode,
|
||||
urb->transfer_dma + offset, length)) {
|
||||
dma->channel_release(channel);
|
||||
|
|
|
@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
|
|||
}
|
||||
|
||||
inode->i_mode = new_mode;
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
di->i_mode = cpu_to_le16(inode->i_mode);
|
||||
di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
|
||||
di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
|
||||
|
||||
ocfs2_journal_dirty(handle, di_bh);
|
||||
|
||||
|
|
|
@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
|
|||
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
|
||||
size_t caller_veclen, u8 target_node, int *status)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct o2net_msg *msg = NULL;
|
||||
size_t veclen, caller_bytes = 0;
|
||||
struct kvec *vec = NULL;
|
||||
|
|
|
@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
|
|||
goto out_commit;
|
||||
}
|
||||
|
||||
cpos = split_hash;
|
||||
ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
|
||||
data_ac, meta_ac, new_dx_leaves,
|
||||
num_dx_leaves);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out_commit;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_dx_leaves; i++) {
|
||||
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
|
||||
orig_dx_leaves[i],
|
||||
|
@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
|
|||
mlog_errno(ret);
|
||||
goto out_commit;
|
||||
}
|
||||
}
|
||||
|
||||
cpos = split_hash;
|
||||
ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
|
||||
data_ac, meta_ac, new_dx_leaves,
|
||||
num_dx_leaves);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out_commit;
|
||||
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
|
||||
new_dx_leaves[i],
|
||||
OCFS2_JOURNAL_ACCESS_WRITE);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out_commit;
|
||||
}
|
||||
}
|
||||
|
||||
ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
|
||||
|
|
|
@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
|
|||
struct dlm_lock_resource *res);
|
||||
void dlm_clean_master_list(struct dlm_ctxt *dlm,
|
||||
u8 dead_node);
|
||||
void dlm_force_free_mles(struct dlm_ctxt *dlm);
|
||||
int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
|
||||
int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
|
||||
int __dlm_lockres_unused(struct dlm_lock_resource *res);
|
||||
|
|
|
@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
|
|||
spin_lock(&dlm->track_lock);
|
||||
if (oldres)
|
||||
track_list = &oldres->tracking;
|
||||
else
|
||||
else {
|
||||
track_list = &dlm->tracking_list;
|
||||
if (list_empty(track_list)) {
|
||||
dl = NULL;
|
||||
spin_unlock(&dlm->track_lock);
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(res, track_list, tracking) {
|
||||
if (&res->tracking == &dlm->tracking_list)
|
||||
|
@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
|
|||
} else
|
||||
dl = NULL;
|
||||
|
||||
bail:
|
||||
/* passed to seq_show */
|
||||
return dl;
|
||||
}
|
||||
|
|
|
@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
|
|||
|
||||
dlm_mark_domain_leaving(dlm);
|
||||
dlm_leave_domain(dlm);
|
||||
dlm_force_free_mles(dlm);
|
||||
dlm_complete_dlm_shutdown(dlm);
|
||||
}
|
||||
dlm_put(dlm);
|
||||
|
|
|
@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
|
|||
wake_up(&res->wq);
|
||||
wake_up(&dlm->migration_wq);
|
||||
}
|
||||
|
||||
void dlm_force_free_mles(struct dlm_ctxt *dlm)
|
||||
{
|
||||
int i;
|
||||
struct hlist_head *bucket;
|
||||
struct dlm_master_list_entry *mle;
|
||||
struct hlist_node *tmp, *list;
|
||||
|
||||
/*
|
||||
* We notified all other nodes that we are exiting the domain and
|
||||
* marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
|
||||
* around we force free them and wake any processes that are waiting
|
||||
* on the mles
|
||||
*/
|
||||
spin_lock(&dlm->spinlock);
|
||||
spin_lock(&dlm->master_lock);
|
||||
|
||||
BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
|
||||
BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
|
||||
|
||||
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
|
||||
bucket = dlm_master_hash(dlm, i);
|
||||
hlist_for_each_safe(list, tmp, bucket) {
|
||||
mle = hlist_entry(list, struct dlm_master_list_entry,
|
||||
master_hash_node);
|
||||
if (mle->type != DLM_MLE_BLOCK) {
|
||||
mlog(ML_ERROR, "bad mle: %p\n", mle);
|
||||
dlm_print_one_mle(mle);
|
||||
}
|
||||
atomic_set(&mle->woken, 1);
|
||||
wake_up(&mle->wq);
|
||||
|
||||
__dlm_unlink_mle(dlm, mle);
|
||||
__dlm_mle_detach_hb_events(dlm, mle);
|
||||
__dlm_put_mle(mle);
|
||||
}
|
||||
}
|
||||
spin_unlock(&dlm->master_lock);
|
||||
spin_unlock(&dlm->spinlock);
|
||||
}
|
||||
|
|
|
@ -84,6 +84,7 @@ enum {
|
|||
OI_LS_PARENT,
|
||||
OI_LS_RENAME1,
|
||||
OI_LS_RENAME2,
|
||||
OI_LS_REFLINK_TARGET,
|
||||
};
|
||||
|
||||
int ocfs2_dlm_init(struct ocfs2_super *osb);
|
||||
|
|
|
@ -235,18 +235,31 @@
|
|||
#define OCFS2_HAS_REFCOUNT_FL (0x0010)
|
||||
|
||||
/* Inode attributes, keep in sync with EXT2 */
|
||||
#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */
|
||||
#define OCFS2_UNRM_FL (0x00000002) /* Undelete */
|
||||
#define OCFS2_COMPR_FL (0x00000004) /* Compress file */
|
||||
#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */
|
||||
#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */
|
||||
#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */
|
||||
#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */
|
||||
#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */
|
||||
#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */
|
||||
#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */
|
||||
#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */
|
||||
#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */
|
||||
#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
|
||||
#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
|
||||
#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
|
||||
#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
|
||||
#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
|
||||
/* Reserved for compression usage... */
|
||||
#define OCFS2_DIRTY_FL FS_DIRTY_FL
|
||||
#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
|
||||
#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
|
||||
#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
|
||||
/* End compression flags --- maybe not all used */
|
||||
#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */
|
||||
#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
|
||||
#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
|
||||
#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
|
||||
#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
|
||||
#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
|
||||
#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
|
||||
#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
|
||||
|
||||
#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */
|
||||
#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */
|
||||
#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
|
||||
#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
|
||||
|
||||
/*
|
||||
* Extent record flags (e_node.leaf.flags)
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
/*
|
||||
* ioctl commands
|
||||
*/
|
||||
#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
|
||||
#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
|
||||
#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
|
||||
#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
|
||||
#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS
|
||||
#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS
|
||||
#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
|
||||
#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
|
||||
|
||||
/*
|
||||
* Space reservation / allocation / free ioctls and argument structure
|
||||
|
|
|
@ -4201,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
|
|||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&new_inode->i_mutex);
|
||||
ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
|
||||
mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
|
||||
ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
|
||||
OI_LS_REFLINK_TARGET);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out_unlock;
|
||||
|
|
|
@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
|
|||
struct ocfs2_alloc_reservation *resv,
|
||||
int *cstart, int *clen)
|
||||
{
|
||||
unsigned int wanted = *clen;
|
||||
|
||||
if (resv == NULL || ocfs2_resmap_disabled(resmap))
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock(&resv_lock);
|
||||
|
||||
/*
|
||||
* We don't want to over-allocate for temporary
|
||||
* windows. Otherwise, we run the risk of fragmenting the
|
||||
* allocation space.
|
||||
*/
|
||||
wanted = ocfs2_resv_window_bits(resmap, resv);
|
||||
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
|
||||
wanted = *clen;
|
||||
|
||||
if (ocfs2_resv_empty(resv)) {
|
||||
mlog(0, "empty reservation, find new window\n");
|
||||
/*
|
||||
* We don't want to over-allocate for temporary
|
||||
* windows. Otherwise, we run the risk of fragmenting the
|
||||
* allocation space.
|
||||
*/
|
||||
unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
|
||||
|
||||
if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
|
||||
wanted = *clen;
|
||||
|
||||
mlog(0, "empty reservation, find new window\n");
|
||||
/*
|
||||
* Try to get a window here. If it works, we must fall
|
||||
* through and test the bitmap . This avoids some
|
||||
|
|
|
@ -357,7 +357,7 @@ out:
|
|||
static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
|
||||
struct ocfs2_group_desc *bg,
|
||||
struct ocfs2_chain_list *cl,
|
||||
u64 p_blkno, u32 clusters)
|
||||
u64 p_blkno, unsigned int clusters)
|
||||
{
|
||||
struct ocfs2_extent_list *el = &bg->bg_list;
|
||||
struct ocfs2_extent_rec *rec;
|
||||
|
@ -369,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
|
|||
rec->e_blkno = cpu_to_le64(p_blkno);
|
||||
rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
|
||||
le16_to_cpu(cl->cl_bpc));
|
||||
rec->e_leaf_clusters = cpu_to_le32(clusters);
|
||||
rec->e_leaf_clusters = cpu_to_le16(clusters);
|
||||
le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
|
||||
le16_add_cpu(&bg->bg_free_bits_count,
|
||||
clusters * le16_to_cpu(cl->cl_bpc));
|
||||
|
|
|
@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
|
|||
xis.inode_bh = xbs.inode_bh = di_bh;
|
||||
di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||
|
||||
down_read(&oi->ip_xattr_sem);
|
||||
ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
|
||||
buffer_size, &xis);
|
||||
if (ret == -ENODATA && di->i_xattr_loc)
|
||||
ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
|
||||
buffer_size, &xbs);
|
||||
up_read(&oi->ip_xattr_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
|
|||
mlog_errno(ret);
|
||||
return ret;
|
||||
}
|
||||
down_read(&OCFS2_I(inode)->ip_xattr_sem);
|
||||
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
|
||||
name, buffer, buffer_size);
|
||||
up_read(&OCFS2_I(inode)->ip_xattr_sem);
|
||||
|
||||
ocfs2_inode_unlock(inode, 0);
|
||||
|
||||
|
|
|
@ -612,7 +612,7 @@ struct drm_gem_object {
|
|||
struct kref refcount;
|
||||
|
||||
/** Handle count of this object. Each handle also holds a reference */
|
||||
struct kref handlecount;
|
||||
atomic_t handle_count; /* number of handles on this object */
|
||||
|
||||
/** Related drm device */
|
||||
struct drm_device *dev;
|
||||
|
@ -1151,6 +1151,7 @@ extern int drm_release(struct inode *inode, struct file *filp);
|
|||
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
|
||||
extern void drm_vm_open_locked(struct vm_area_struct *vma);
|
||||
extern void drm_vm_close_locked(struct vm_area_struct *vma);
|
||||
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
|
||||
|
||||
/* Memory management support (drm_memory.h) */
|
||||
|
@ -1411,12 +1412,11 @@ int drm_gem_init(struct drm_device *dev);
|
|||
void drm_gem_destroy(struct drm_device *dev);
|
||||
void drm_gem_object_release(struct drm_gem_object *obj);
|
||||
void drm_gem_object_free(struct kref *kref);
|
||||
void drm_gem_object_free_unlocked(struct kref *kref);
|
||||
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
|
||||
size_t size);
|
||||
int drm_gem_object_init(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, size_t size);
|
||||
void drm_gem_object_handle_free(struct kref *kref);
|
||||
void drm_gem_object_handle_free(struct drm_gem_object *obj);
|
||||
void drm_gem_vm_open(struct vm_area_struct *vma);
|
||||
void drm_gem_vm_close(struct vm_area_struct *vma);
|
||||
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
@ -1439,8 +1439,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
|
|||
static inline void
|
||||
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj != NULL)
|
||||
kref_put(&obj->refcount, drm_gem_object_free_unlocked);
|
||||
if (obj != NULL) {
|
||||
struct drm_device *dev = obj->dev;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int drm_gem_handle_create(struct drm_file *file_priv,
|
||||
|
@ -1451,7 +1455,7 @@ static inline void
|
|||
drm_gem_object_handle_reference(struct drm_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_reference(obj);
|
||||
kref_get(&obj->handlecount);
|
||||
atomic_inc(&obj->handle_count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1460,12 +1464,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
|
|||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
if (atomic_read(&obj->handle_count) == 0)
|
||||
return;
|
||||
/*
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before we
|
||||
* checked for a name
|
||||
*/
|
||||
kref_put(&obj->handlecount, drm_gem_object_handle_free);
|
||||
if (atomic_dec_and_test(&obj->handle_count))
|
||||
drm_gem_object_handle_free(obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
}
|
||||
|
||||
|
@ -1475,12 +1482,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
|
|||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
if (atomic_read(&obj->handle_count) == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before we
|
||||
* checked for a name
|
||||
*/
|
||||
kref_put(&obj->handlecount, drm_gem_object_handle_free);
|
||||
|
||||
if (atomic_dec_and_test(&obj->handle_count))
|
||||
drm_gem_object_handle_free(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,6 @@
|
|||
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
|
||||
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
|
||||
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
|
||||
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -103,6 +102,7 @@
|
|||
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
|
||||
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
|
||||
|
|
|
@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct address_space *mapping;
|
||||
unsigned long end = start + size;
|
||||
struct vm_area_struct *vma;
|
||||
int err = -EINVAL;
|
||||
int has_write_lock = 0;
|
||||
|
@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|||
if (start + size <= start)
|
||||
return err;
|
||||
|
||||
/* Does pgoff wrap? */
|
||||
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
|
||||
return err;
|
||||
|
||||
/* Can we represent this offset inside this architecture's pte's? */
|
||||
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
|
||||
if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
|
||||
|
@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|||
if (!(vma->vm_flags & VM_CAN_NONLINEAR))
|
||||
goto out;
|
||||
|
||||
if (end <= start || start < vma->vm_start || end > vma->vm_end)
|
||||
if (start < vma->vm_start || start + size > vma->vm_end)
|
||||
goto out;
|
||||
|
||||
/* Must set VM_NONLINEAR before any pages are populated. */
|
||||
|
|
|
@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
|
|||
/* Lenovo Thinkpad T61/X61 */
|
||||
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
|
||||
SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
|
||||
SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
|
|||
}
|
||||
|
||||
if (spec->autocfg.dig_in_pin) {
|
||||
hda_nid_t dig_nid;
|
||||
err = snd_hda_get_connections(codec,
|
||||
spec->autocfg.dig_in_pin,
|
||||
&dig_nid, 1);
|
||||
if (err > 0)
|
||||
spec->dig_in_nid = dig_nid;
|
||||
dig_nid = codec->start_nid;
|
||||
for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
|
||||
unsigned int wcaps = get_wcaps(codec, dig_nid);
|
||||
if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
|
||||
continue;
|
||||
if (!(wcaps & AC_WCAP_DIGITAL))
|
||||
continue;
|
||||
if (!(wcaps & AC_WCAP_CONN_LIST))
|
||||
continue;
|
||||
err = get_connection_index(codec, dig_nid,
|
||||
spec->autocfg.dig_in_pin);
|
||||
if (err >= 0) {
|
||||
spec->dig_in_nid = dig_nid;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
|
|||
chip->model.suspend = claro_suspend;
|
||||
chip->model.resume = claro_resume;
|
||||
chip->model.set_adc_params = set_ak5385_params;
|
||||
chip->model.device_config = PLAYBACK_0_TO_I2S |
|
||||
PLAYBACK_1_TO_SPDIF |
|
||||
CAPTURE_0_FROM_I2S_2 |
|
||||
CAPTURE_1_FROM_SPDIF;
|
||||
break;
|
||||
}
|
||||
if (id->driver_data == MODEL_MERIDIAN ||
|
||||
|
|
|
@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
spin_lock_irqsave(&hdsp->lock, flags);
|
||||
info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
|
||||
info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
|
||||
|
|
|
@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
|
|||
|
||||
case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
spin_lock_irq(&hdspm->lock);
|
||||
info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
|
||||
info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/clock.h>
|
||||
|
||||
#include <cpu/sh7722.h>
|
||||
|
@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
|
|||
};
|
||||
|
||||
static struct clk siumckb_clk = {
|
||||
.name = "siumckb_clk",
|
||||
.id = -1,
|
||||
.ops = &siumckb_clk_ops,
|
||||
.rate = 0, /* initialised at run-time */
|
||||
};
|
||||
|
||||
static struct clk_lookup *siumckb_lookup;
|
||||
|
||||
static int migor_hw_params(struct snd_pcm_substream *substream,
|
||||
struct snd_pcm_hw_params *params)
|
||||
{
|
||||
|
@ -180,6 +181,13 @@ static int __init migor_init(void)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
|
||||
if (!siumckb_lookup) {
|
||||
ret = -ENOMEM;
|
||||
goto eclkdevalloc;
|
||||
}
|
||||
clkdev_add(siumckb_lookup);
|
||||
|
||||
/* Port number used on this machine: port B */
|
||||
migor_snd_device = platform_device_alloc("soc-audio", 1);
|
||||
if (!migor_snd_device) {
|
||||
|
@ -200,12 +208,15 @@ static int __init migor_init(void)
|
|||
epdevadd:
|
||||
platform_device_put(migor_snd_device);
|
||||
epdevalloc:
|
||||
clkdev_drop(siumckb_lookup);
|
||||
eclkdevalloc:
|
||||
clk_unregister(&siumckb_clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit migor_exit(void)
|
||||
{
|
||||
clkdev_drop(siumckb_lookup);
|
||||
clk_unregister(&siumckb_clk);
|
||||
platform_device_unregister(migor_snd_device);
|
||||
}
|
||||
|
|
|
@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
|
|||
data[1] = (value >> 8) & 0xff;
|
||||
data[2] = value & 0xff;
|
||||
|
||||
if (!snd_soc_codec_volatile_register(codec, reg))
|
||||
reg_cache[reg] = value;
|
||||
if (!snd_soc_codec_volatile_register(codec, reg)
|
||||
&& reg < codec->reg_cache_size)
|
||||
reg_cache[reg] = value;
|
||||
|
||||
if (codec->cache_only) {
|
||||
codec->cache_sync = 1;
|
||||
|
|
Loading…
Reference in New Issue