系列文章:
Linux spi驅動架構分析(一)
Linux spi驅動架構分析(二)
Linux spi驅動架構分析(三)
Linux spi驅動架構分析(四)
spi_master的消息隊列機制
SPI資料傳輸可以有兩種方式:同步方式和異步方式。所謂同步方式是指資料傳輸的發起者必須等待本次傳輸的結束,期間不能做其它事情,用代碼來解釋就是,調用傳輸的函數後,直到資料傳輸完成,函數才會傳回。而異步方式則正好相反,資料傳輸的發起者無需等待傳輸的結束,資料傳輸期間還可以做其它事情,用代碼來解釋就是,調用傳輸的函數後,函數會立刻傳回而不用等待資料傳輸完成,我們隻需設定一個回調函數,傳輸完成後,該回調函數會被調用以通知發起者資料傳送已經完成。同步方式簡單易用,很适合處理那些少量資料的單次傳輸。但是對于資料量大、次數多的傳輸來說,異步方式就顯得更加合适。
對于SPI控制器來說,要支援異步方式必須要考慮以下兩種狀況:
- 對于同一個資料傳輸的發起者,既然異步方式無需等待資料傳輸完成即可傳回,傳回後,該發起者可以立刻又發起一個message,而這時上一個message還沒有處理完。
- 對于另外一個不同的發起者來說,也有可能同時發起一次message傳輸請求。
隊列化正是為了為了解決以上的問題,所謂隊列化,是指把等待傳輸的message放入一個隊列中,發起一個傳輸操作,其實就是把對應的message按先後順序放入一個隊列中。核心會建立一個核心工作線程,通過線程來處理隊列上的message。
一個或者多個裝置驅動程式可以同時向控制器驅動發起多個spi_message請求,這些spi_message也是以連結清單的形式被連結在spi_master結構體的queue成員裡。
spi_master,spi_message,spi_transfer這幾個資料結構的關系可以用下圖來描述:
如果spi控制器驅動要想支援消息隊列機制的話,注冊spi_master時,其transfer成員不能設定,具體細節如下代碼所示:
int spi_register_master(struct spi_master *master)
{
static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
struct device *dev = master->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
int dynamic = 0;
......
if (master->transfer)
dev_info(dev, "master is unqueued, this is deprecated\n");
else {
//消息隊列機制初始化,建立核心工作隊列等
status = spi_master_initialize_queue(master);
if (status) {
device_del(&master->dev);
goto done;
}
}
......
}
spi_master_initialize_queue函數執行流程圖如下所示:
進入spi_master_initialize_queue函數:
static int spi_master_initialize_queue(struct spi_master *master)
{
int ret;
//設定spi_master->transfer為spi_queued_transfer
master->transfer = spi_queued_transfer;
//若驅動未提供transfer_one_message則設定為spi_transfer_one_message
if (!master->transfer_one_message)
master->transfer_one_message = spi_transfer_one_message;
/* 建立工作線程等*/
ret = spi_init_queue(master);
if (ret) {
dev_err(&master->dev, "problem initializing queue\n");
goto err_init_queue;
}
master->queued = true;
//開始工作
ret = spi_start_queue(master);
if (ret) {
dev_err(&master->dev, "problem starting queue\n");
goto err_start_queue;
}
return 0;
err_start_queue:
spi_destroy_queue(master);
err_init_queue:
return ret;
}
在分析spi_init_queue函數之前,先介紹下什麼是工作線程。要完成工作的話,得有勞工和工作。kthread_worker代表勞工,而kthread_work代表工作,定義于include/linux/kthread.h。
struct kthread_worker {
unsigned int flags;
spinlock_t lock;
//kthread_work會連結在這連結清單上,相當于流水線
struct list_head work_list;
struct list_head delayed_work_list;
//為該kthread_worker執行任務的線程對于的task_struct
struct task_struct *task;
//目前正在處理的kthread_work
struct kthread_work *current_work;
};
struct kthread_work {
struct list_head node;
//執行函數,該kthread_work所要做的事情
kthread_work_func_t func;
//指向處理該kthread_work的kthread_worker
struct kthread_worker *worker;
/* Number of canceling calls that are running at the moment. */
int canceling;
};
工作線程即是建立一個内線線程,核心線程執行kthread_worker_fn函數,在該函數會從kthread_worker的work_list連結清單裡,取出每個kthread_work然後執行kthread_work裡的func函數。
spi_init_queue函數主要做建立線程,初始化kthread_worker等工作。
static int spi_init_queue(struct spi_master *master)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
master->running = false;
master->busy = false;
//初始化spi_master裡的kthread_worker
kthread_init_worker(&master->kworker);
//建立一個核心線程,執行函數為kthread_worker_fn
master->kworker_task = kthread_run(kthread_worker_fn,
&master->kworker, "%s",
dev_name(&master->dev));
......
//初始化spi_master裡的kthread_work,把func設定為spi_pump_messages
kthread_init_work(&master->pump_messages, spi_pump_messages);
......
}
初始化完後,調用spi_start_queue函數開始工作:
static int spi_start_queue(struct spi_master *master)
{
unsigned long flags;
......
//把spi_master->kthread_work放入spi_master->kthread_worker的work_list連結清單,并喚醒工作線程
kthread_queue_work(&master->kworker, &master->pump_messages);
return 0;
}
勞工和工作都有了,準備就緒,看看kthread_worker_fn函數:
int kthread_worker_fn(void *worker_ptr)
{
struct kthread_worker *worker = worker_ptr;
struct kthread_work *work;
......
repeat:
......
work = NULL;
spin_lock_irq(&worker->lock);
//判斷work_list連結清單是否空
if (!list_empty(&worker->work_list)) {
//不空,則取出第一個kthread_work
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
spin_unlock_irq(&worker->lock);
//如果存在kthread_work
if (work) {
__set_current_state(TASK_RUNNING);
work->func(work); //執行工作,調用spi_pump_messages函數
} else if (!freezing(current))
schedule(); //無工作則睡眠
try_to_freeze();
goto repeat;
}
存在kthread_work,調用裡面的func函數,即spi_pump_messages:
static void spi_pump_messages(struct kthread_work *work)
{
struct spi_master *master =
container_of(work, struct spi_master, pump_messages);
__spi_pump_messages(master, true);
}
static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
{
unsigned long flags;
bool was_busy = false;
int ret;
......
/* 如果spi_master->queue裡沒有messages */
if (list_empty(&master->queue) || !master->running) {
......
//調用spi_master->unprepare_transfer_hardware釋放相關硬體資源
if (master->unprepare_transfer_hardware &&
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
"failed to unprepare transfer hardware\n");
......
return;
}
//spi_master->queue裡有messages,取出第一個messages
master->cur_msg =
list_first_entry(&master->queue, struct spi_message, queue);
......
//調用spi_master->prepare_transfer_hardware準備必要的硬體資源
if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
......
}
trace_spi_message_start(master->cur_msg);
//調用spi_master->prepare_message對spi_message進行預處理
if (master->prepare_message) {
ret = master->prepare_message(master, master->cur_msg);
......
master->cur_msg_prepared = true;
}
......
//最後調用spi_master->transfer_one_message傳輸一個spi_message
ret = master->transfer_one_message(master, master->cur_msg);
......
}
前面分析spi_master_initialize_queue函數時,如果驅動未提供transfer_one_message則設定為spi_transfer_one_message,那麼就來分析這個函數:
static int spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
unsigned long long ms = 1;
struct spi_statistics *statm = &master->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
//片選
spi_set_cs(msg->spi, true);
......
//取出每個spi_transfer,調用spi_master ->transfer_one函數發送
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
......
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&master->xfer_completion);
ret = master->transfer_one(master, msg->spi, xfer);
......
}
......
out:
if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false);
......
//目前的message傳輸完成,做相關處理
spi_finalize_current_message(master);
return ret;
}
spi_finalize_current_message:
void spi_finalize_current_message(struct spi_master *master)
{
struct spi_message *mesg;
unsigned long flags;
int ret;
......
//調用master->unprepare_message釋放一些資源
if (master->cur_msg_prepared && master->unprepare_message) {
ret = master->unprepare_message(master, mesg);
if (ret) {
dev_err(&master->dev,
"failed to unprepare message: %d\n", ret);
}
}
......
//重新放入kworker
kthread_queue_work(&master->kworker, &master->pump_messages);
......
//處理完message,調用spi_master->complete
if (mesg->complete)
mesg->complete(mesg->context);
}
對于裝置驅動程式來講,之後調用spi_sync或spi_async函數即可發起一個message請求,隊列化和工作線程被激活,觸發一系列的操作,最終完成message的傳輸操作。
int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
int ret;
unsigned long flags;
//檢查spi_message
ret = __spi_validate(spi, message);
if (ret != 0)
return ret;
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
if (master->bus_lock_flag)
ret = -EBUSY;
else
ret = __spi_async(spi, message); //發起異步傳輸
spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
return ret;
}
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
message->spi = spi;
......
//調用spi_master->transfer,前面代碼分析,采用隊列化機制的話,被設定為spi_queued_transfer
return master->transfer(spi, message);
}
spi_queued_transfer:
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
return __spi_queued_transfer(spi, msg, true);
}
static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
struct spi_master *master = spi->master;
unsigned long flags;
spin_lock_irqsave(&master->queue_lock, flags);
......
//把spi_message放入spi_message->queue連結清單
list_add_tail(&msg->queue, &master->queue);
if (!master->busy && need_pump)
/* 重新把spi_message->pump_messages這個kthread_work放入spi_message->kworker
* 并喚醒工作線程
*/
kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return 0;
}
最後總結一下spi_async函數執行流程圖,如下:
通用spi裝置驅動
核心提供了一個通用的SPI外設驅動,驅動檔案為driver/spi/spidev.c。
入口函數:
static int __init spidev_init(void)
{
int status;
......
//注冊字元裝置,主裝置号為153,file_operations為spidev_fops
status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
if (status < 0)
return status;
//建立class
spidev_class = class_create(THIS_MODULE, "spidev");
if (IS_ERR(spidev_class)) {
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return PTR_ERR(spidev_class);
}
//注冊spi驅動
status = spi_register_driver(&spidev_spi_driver);
if (status < 0) {
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
return status;
}
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.of_match_table = of_match_ptr(spidev_dt_ids),
.acpi_match_table = ACPI_PTR(spidev_acpi_ids),
},
.probe = spidev_probe,
.remove = spidev_remove,
};
驅動與裝置比對,調用spidev_probe:
struct spidev_data {
dev_t devt;
spinlock_t spi_lock;
struct spi_device *spi;
//用于插入全局連結清單device_list
struct list_head device_entry;
struct mutex buf_lock;
unsigned users;
//儲存使用者空間傳入的資料
u8 *tx_buffer;
//用于接收裝置的資料
u8 *rx_buffer;
u32 speed_hz;
};
static int spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
unsigned long minor;
......
/* 配置設定一個struct spidev_data,用于儲存裝置相關資訊 */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
return -ENOMEM;
/* 初始化spidev_data */
spidev->spi = spi;
spin_lock_init(&spidev->spi_lock);
mutex_init(&spidev->buf_lock);
INIT_LIST_HEAD(&spidev->device_entry);
mutex_lock(&device_list_lock);
minor = find_first_zero_bit(minors, N_SPI_MINORS);
if (minor < N_SPI_MINORS) {
struct device *dev;
spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
//建立裝置檔案
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi->chip_select);
status = PTR_ERR_OR_ZERO(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
}
if (status == 0) {
set_bit(minor, minors);
//把spidev_data插入全局連結清單device_list
list_add(&spidev->device_entry, &device_list);
}
mutex_unlock(&device_list_lock);
spidev->speed_hz = spi->max_speed_hz;
if (status == 0)
spi_set_drvdata(spi, spidev);
else
kfree(spidev);
return status;
}
這個驅動程式為使用者空間提供了統一的接口:
static const struct file_operations spidev_fops = {
.owner = THIS_MODULE,
.write = spidev_write,
.read = spidev_read,
.unlocked_ioctl = spidev_ioctl,
.compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
.llseek = no_llseek,
};
當應用層open時,會調用到spidev_fops的open函數:
static int spidev_open(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
int status = -ENXIO;
mutex_lock(&device_list_lock);
list_for_each_entry(spidev, &device_list, device_entry) {
if (spidev->devt == inode->i_rdev) {
status = 0;
break;
}
}
......
//配置設定發送buf,大小為4096
if (!spidev->tx_buffer) {
spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->tx_buffer) {
dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_find_dev;
}
}
//配置設定buf緩存
if (!spidev->rx_buffer) {
spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->rx_buffer) {
dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_alloc_rx_buf;
}
}
......
}
當應用層write時,會調用到spidev_fops的write函數進行發送資料:
static ssize_t
spidev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
ssize_t status = 0;
unsigned long missing;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
return -EMSGSIZE;
spidev = filp->private_data;
mutex_lock(&spidev->buf_lock);
//拷貝資料到發送buf,
missing = copy_from_user(spidev->tx_buffer, buf, count);
if (missing == 0)
//同步發送
status = spidev_sync_write(spidev, count);
else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
return status;
}
static inline ssize_t
spidev_sync_write(struct spidev_data *spidev, size_t len)
{
//構造一個struct spi_message
struct spi_transfer t = {
.tx_buf = spidev->tx_buffer,
.len = len,
.speed_hz = spidev->speed_hz,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
//發送spi_message
return spidev_sync(spidev, &m);
}
進行複雜的資料傳輸時,通過ioctl指令,如下使用例子:
struct spi_ioc_transfer xfer;
......
//初始化xfer
//傳輸資料
ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
......
struct spi_ioc_transfer結構體跟struct spi_transfer結構體類似,定義如下:
struct spi_ioc_transfer {
__u64 tx_buf;
__u64 rx_buf;
__u32 len;
__u32 speed_hz;
__u16 delay_usecs;
__u8 bits_per_word;
__u8 cs_change;
__u8 tx_nbits;
__u8 rx_nbits;
__u16 pad;
};
當應用層ioctl時,會調用到spidev_fops的spidev_ioctl函數,對于通過ioctl指令進行資料傳輸:
static long
spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int err = 0;
int retval = 0;
struct spidev_data *spidev;
struct spi_device *spi;
u32 tmp;
unsigned n_ioc;
struct spi_ioc_transfer *ioc;
......
switch (cmd) {
......
default:
//拷貝使用者空間傳入的參數到核心空間
ioc = spidev_get_ioc_message(cmd,
(struct spi_ioc_transfer __user *)arg, &n_ioc);
......
//用spi_ioc_transfer建構spi_message,最終調用spi_sync
retval = spidev_message(spidev, ioc, n_ioc);
kfree(ioc);
break;
}
......
}