2013-11-27 04:30:40 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004 IBM Corporation
|
|
|
|
* Authors:
|
|
|
|
* Leendert van Doorn <leendert@watson.ibm.com>
|
|
|
|
* Dave Safford <safford@watson.ibm.com>
|
|
|
|
* Reiner Sailer <sailer@watson.ibm.com>
|
|
|
|
* Kylene Hall <kjhall@us.ibm.com>
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Obsidian Research Corp
|
|
|
|
* Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
|
|
|
|
*
|
|
|
|
* Device file system interface to the TPM
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation, version 2 of the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include "tpm.h"
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv {
|
|
|
|
struct tpm_chip *chip;
|
|
|
|
|
|
|
|
/* Data passed to and from the tpm via the read/write calls */
|
|
|
|
atomic_t data_pending;
|
|
|
|
struct mutex buffer_mutex;
|
|
|
|
|
|
|
|
struct timer_list user_read_timer; /* user needs to claim result */
|
|
|
|
struct work_struct work;
|
|
|
|
|
|
|
|
u8 data_buffer[TPM_BUFSIZE];
|
|
|
|
};
|
|
|
|
|
2013-11-27 04:30:40 +08:00
|
|
|
static void user_reader_timeout(unsigned long ptr)
|
|
|
|
{
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv *priv = (struct file_priv *)ptr;
|
2013-11-27 04:30:40 +08:00
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
schedule_work(&priv->work);
|
2013-11-27 04:30:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void timeout_work(struct work_struct *work)
|
|
|
|
{
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv *priv = container_of(work, struct file_priv, work);
|
2013-11-27 04:30:40 +08:00
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
mutex_lock(&priv->buffer_mutex);
|
|
|
|
atomic_set(&priv->data_pending, 0);
|
|
|
|
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 04:30:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tpm_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct miscdevice *misc = file->private_data;
|
|
|
|
struct tpm_chip *chip = container_of(misc, struct tpm_chip,
|
|
|
|
vendor.miscdev);
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv *priv;
|
2013-11-27 04:30:40 +08:00
|
|
|
|
|
|
|
/* It's assured that the chip will be opened just once,
|
|
|
|
* by the check of is_open variable, which is protected
|
|
|
|
* by driver_lock. */
|
|
|
|
if (test_and_set_bit(0, &chip->is_open)) {
|
|
|
|
dev_dbg(chip->dev, "Another process owns this TPM\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (priv == NULL) {
|
2013-11-27 04:30:40 +08:00
|
|
|
clear_bit(0, &chip->is_open);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
priv->chip = chip;
|
|
|
|
atomic_set(&priv->data_pending, 0);
|
|
|
|
mutex_init(&priv->buffer_mutex);
|
|
|
|
setup_timer(&priv->user_read_timer, user_reader_timeout,
|
|
|
|
(unsigned long)priv);
|
|
|
|
INIT_WORK(&priv->work, timeout_work);
|
2013-11-27 04:30:40 +08:00
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
file->private_data = priv;
|
2013-11-27 04:30:40 +08:00
|
|
|
get_device(chip->dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tpm_read(struct file *file, char __user *buf,
|
|
|
|
size_t size, loff_t *off)
|
|
|
|
{
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv *priv = file->private_data;
|
2013-11-27 04:30:40 +08:00
|
|
|
ssize_t ret_size;
|
|
|
|
int rc;
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
del_singleshot_timer_sync(&priv->user_read_timer);
|
|
|
|
flush_work(&priv->work);
|
|
|
|
ret_size = atomic_read(&priv->data_pending);
|
2013-11-27 04:30:40 +08:00
|
|
|
if (ret_size > 0) { /* relay data */
|
|
|
|
ssize_t orig_ret_size = ret_size;
|
|
|
|
if (size < ret_size)
|
|
|
|
ret_size = size;
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
mutex_lock(&priv->buffer_mutex);
|
|
|
|
rc = copy_to_user(buf, priv->data_buffer, ret_size);
|
|
|
|
memset(priv->data_buffer, 0, orig_ret_size);
|
2013-11-27 04:30:40 +08:00
|
|
|
if (rc)
|
|
|
|
ret_size = -EFAULT;
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 04:30:40 +08:00
|
|
|
}
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
atomic_set(&priv->data_pending, 0);
|
2013-11-27 04:30:40 +08:00
|
|
|
|
|
|
|
return ret_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|
|
|
size_t size, loff_t *off)
|
|
|
|
{
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv *priv = file->private_data;
|
2013-11-27 04:30:40 +08:00
|
|
|
size_t in_size = size;
|
|
|
|
ssize_t out_size;
|
|
|
|
|
|
|
|
/* cannot perform a write until the read has cleared
|
|
|
|
either via tpm_read or a user_read_timer timeout.
|
|
|
|
This also prevents splitted buffered writes from blocking here.
|
|
|
|
*/
|
2013-11-27 04:30:45 +08:00
|
|
|
if (atomic_read(&priv->data_pending) != 0)
|
2013-11-27 04:30:40 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (in_size > TPM_BUFSIZE)
|
|
|
|
return -E2BIG;
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
mutex_lock(&priv->buffer_mutex);
|
2013-11-27 04:30:40 +08:00
|
|
|
|
|
|
|
if (copy_from_user
|
2013-11-27 04:30:45 +08:00
|
|
|
(priv->data_buffer, (void __user *) buf, in_size)) {
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 04:30:40 +08:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* atomic tpm command send and result receive */
|
2013-11-27 04:30:45 +08:00
|
|
|
out_size = tpm_transmit(priv->chip, priv->data_buffer,
|
|
|
|
sizeof(priv->data_buffer));
|
2013-11-27 04:30:40 +08:00
|
|
|
if (out_size < 0) {
|
2013-11-27 04:30:45 +08:00
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 04:30:40 +08:00
|
|
|
return out_size;
|
|
|
|
}
|
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
atomic_set(&priv->data_pending, out_size);
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 04:30:40 +08:00
|
|
|
|
|
|
|
/* Set a timeout by which the reader must come claim the result */
|
2013-11-27 04:30:45 +08:00
|
|
|
mod_timer(&priv->user_read_timer, jiffies + (60 * HZ));
|
2013-11-27 04:30:40 +08:00
|
|
|
|
|
|
|
return in_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called on file close
|
|
|
|
*/
|
|
|
|
static int tpm_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-11-27 04:30:45 +08:00
|
|
|
struct file_priv *priv = file->private_data;
|
2013-11-27 04:30:40 +08:00
|
|
|
|
2013-11-27 04:30:45 +08:00
|
|
|
del_singleshot_timer_sync(&priv->user_read_timer);
|
|
|
|
flush_work(&priv->work);
|
2013-11-27 04:30:40 +08:00
|
|
|
file->private_data = NULL;
|
2013-11-27 04:30:45 +08:00
|
|
|
atomic_set(&priv->data_pending, 0);
|
|
|
|
clear_bit(0, &priv->chip->is_open);
|
|
|
|
put_device(priv->chip->dev);
|
|
|
|
kfree(priv);
|
2013-11-27 04:30:40 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations tpm_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.llseek = no_llseek,
|
|
|
|
.open = tpm_open,
|
|
|
|
.read = tpm_read,
|
|
|
|
.write = tpm_write,
|
|
|
|
.release = tpm_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
int tpm_dev_add_device(struct tpm_chip *chip)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
chip->vendor.miscdev.fops = &tpm_fops;
|
|
|
|
if (chip->dev_num == 0)
|
|
|
|
chip->vendor.miscdev.minor = TPM_MINOR;
|
|
|
|
else
|
|
|
|
chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
|
|
|
|
|
|
|
|
chip->vendor.miscdev.name = chip->devname;
|
|
|
|
chip->vendor.miscdev.parent = chip->dev;
|
|
|
|
|
|
|
|
rc = misc_register(&chip->vendor.miscdev);
|
|
|
|
if (rc) {
|
|
|
|
chip->vendor.miscdev.name = NULL;
|
|
|
|
dev_err(chip->dev,
|
|
|
|
"unable to misc_register %s, minor %d err=%d\n",
|
|
|
|
chip->vendor.miscdev.name,
|
|
|
|
chip->vendor.miscdev.minor, rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tpm_dev_del_device(struct tpm_chip *chip)
|
|
|
|
{
|
|
|
|
if (chip->vendor.miscdev.name)
|
|
|
|
misc_deregister(&chip->vendor.miscdev);
|
|
|
|
}
|