天天看點

linux phy napi

概述:

       NAPI是linux新的網卡資料處理API,據說是由于找不到更好的名字,是以就叫NAPI(New API),在2.5之後引入。簡單來說,NAPI是綜合中斷方式與輪詢方式的技術。中斷的好處是響應及時,如果資料量較小,則不會占用太多的CPU事件;缺點是資料量大時,會産生過多中斷,而每個中斷都要消耗不少的CPU時間,進而導緻效率反而不如輪詢高。輪詢方式與中斷方式相反,它更适合處理大量資料,因為每次輪詢不需要消耗過多的CPU時間;缺點是即使隻接收很少資料或不接收資料時,也要占用CPU時間。NAPI是兩者的結合,資料量低時采用中斷,資料量高時采用輪詢。平時是中斷方式,當有資料到達時,會觸發中斷處理函數執行,中斷處理函數關閉中斷開始處理。如果此時有資料到達,則沒必要再觸發中斷了,因為中斷處理函數中會輪詢處理資料,直到沒有新資料時才打開中斷。很明顯,資料量很低與很高時,NAPI可以發揮中斷與輪詢方式的優點,性能較好。如果資料量不穩定,且說高不高說低不低,則NAPI則會在兩種方式切換上消耗不少時間,效率反而較低一些。

一、phy-napi增加

static int nuc970_ether_probe(struct platform_device *pdev)
{
  ......
  netif_napi_add(dev, ðer->napi, nuc970_poll, 16);
  ......
}      
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
        int (*poll)(struct napi_struct *, int), int weight)
{
  INIT_LIST_HEAD(&napi->poll_list); //初始化連結清單
  napi->gro_count = 0;
  napi->gro_list = NULL;
  napi->skb = NULL;
  napi->poll = poll;
  if (weight > NAPI_POLL_WEIGHT)
    pr_err_once("netif_napi_add() called with weight %d on device %s\n",
          weight, dev->name);
  napi->weight = weight;
  list_add(&napi->dev_list, &dev->napi_list);
  napi->dev = dev;
#ifdef CONFIG_NETPOLL
  spin_lock_init(&napi->poll_lock);
  napi->poll_owner = -1;
#endif
  set_bit(NAPI_STATE_SCHED, &napi->state); //設定napi的狀态為NAPI_STATE_SCHED
}
EXPORT_SYMBOL(netif_napi_add);      

二、phy-napi使能

static int nuc970_ether_open(struct net_device *dev)
{
  ......
  napi_enable(ðer->napi);
  ......
}      
static inline void napi_enable(struct napi_struct *n)
{
  BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  smp_mb__before_clear_bit();
  clear_bit(NAPI_STATE_SCHED, &n->state);
}      

三、phy-napi排程喚醒

static irqreturn_t nuc970_rx_interrupt(int irq, void *dev_id)
{
  ......
  napi_schedule(&ether->napi);
  ......
}      
static inline void napi_schedule(struct napi_struct *n)
{
  if (napi_schedule_prep(n))
    __napi_schedule(n);
}      
static inline bool napi_schedule_prep(struct napi_struct *n)
{
  return !napi_disable_pending(n) &&
    !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}      
void __napi_schedule(struct napi_struct *n)
{
  unsigned long flags;

  local_irq_save(flags);
  ____napi_schedule(&__get_cpu_var(softnet_data), n);
  local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);      

四、phy-napi排程完成

static int nuc970_poll(struct napi_struct *napi, int budget)
{
        ......
  __napi_complete(napi);
        ......
}      
void __napi_complete(struct napi_struct *n)
{
  BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  BUG_ON(n->gro_list);

  list_del(&n->poll_list);
  smp_mb__before_clear_bit();
  clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);      

五、phy-napi關閉

static int nuc970_ether_close(struct net_device *dev)
{
        ......
  napi_disable(ðer->napi);
  ......
}      
static inline void napi_disable(struct napi_struct *n)
{
  set_bit(NAPI_STATE_DISABLE, &n->state);
  while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
    msleep(1);
  clear_bit(NAPI_STATE_DISABLE, &n->state);
}      

繼續閱讀