[Open-FCoE] [RFC PATCH 4/4] bnx2fc: use fcoe_percpu structure and related functions

Bhanu Gollapudi bprakash at broadcom.com
Fri Jan 21 02:56:31 UTC 2011


Now that fcoe_percpu_s and skb queue related functions are moved to libfcoe,
remove the duplicate code and start using them.

Signed-off-by: Nithin Nayak Sujir <nsujir at broadcom.com>
Signed-off-by: Bhanu Prakash Gollapudi <bprakash at broadcom.com>
---
 drivers/scsi/bnx2fc/bnx2fc.h      |   11 +--
 drivers/scsi/bnx2fc/bnx2fc_fcoe.c |  145 ++++++++-----------------------------
 drivers/scsi/bnx2fc/bnx2fc_hwi.c  |   12 ++--
 3 files changed, 39 insertions(+), 129 deletions(-)

diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 95f9a5c..f768082 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -134,16 +134,11 @@
 
 #define BNX2FC_RNID_HBA			0x7
 
-struct bnx2fc_global_s {
-	struct task_struct *l2_thread;
-	struct sk_buff_head fcoe_rx_list;
-	struct page *crc_eof_page;
-	int crc_eof_offset;
-};
-extern struct bnx2fc_global_s bnx2fc_global;
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
 
 struct bnx2fc_percpu_s {
-	struct task_struct *thread;
+	struct task_struct *iothread;
 	struct list_head work_list;
 	spinlock_t fp_work_lock;
 };
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e9af99b..68754ab 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -34,13 +34,16 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
-#define FCOE_LOW_QUEUE_DEPTH	32
 #define FCOE_WORD_TO_BYTE  4
 
 static struct scsi_transport_template	*bnx2fc_transport_template;
 static struct scsi_transport_template	*bnx2fc_vport_xport_template;
 
-struct bnx2fc_global_s bnx2fc_global;
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
 
 static struct cnic_ulp_ops bnx2fc_cnic_cb;
 static struct libfc_function_template bnx2fc_libfc_fcn_templ;
@@ -55,7 +58,6 @@ static int bnx2fc_disable(struct net_device *netdev);
 static void bnx2fc_recv_frame(struct sk_buff *skb);
 
 static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
-static void bnx2fc_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb);
 static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
 static int bnx2fc_net_config(struct fc_lport *lp);
 static int bnx2fc_lport_config(struct fc_lport *lport);
@@ -89,11 +91,9 @@ static struct notifier_block bnx2fc_cpu_notifier = {
 	.notifier_call = bnx2fc_cpu_callback,
 };
 
-#define FCOE_MAX_QUEUE_DEPTH 256
-
 static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
 {
-	struct bnx2fc_global_s *bg;
+	struct fcoe_percpu_s *bg;
 	struct fcoe_rcv_info *fr;
 	struct sk_buff_head *list;
 	struct sk_buff *skb, *next;
@@ -115,99 +115,14 @@ static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
 	spin_unlock_bh(&bg->fcoe_rx_list.lock);
 }
 
-static void bnx2fc_queue_timer(ulong lport)
-{
-	bnx2fc_check_wait_queue((struct fc_lport *) lport, NULL);
-}
-
-static void bnx2fc_clean_pending_queue(struct fc_lport *lp)
-{
-	struct fcoe_port *port;
-	struct sk_buff *skb;
-	port = lport_priv(lp);
-
-	spin_lock_bh(&port->fcoe_pending_queue.lock);
-	while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
-		spin_unlock_bh(&port->fcoe_pending_queue.lock);
-		kfree_skb(skb);
-		spin_lock_bh(&port->fcoe_pending_queue.lock);
-	}
-	spin_unlock_bh(&port->fcoe_pending_queue.lock);
-}
-
-static void bnx2fc_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
-{
-	struct fcoe_port *port = lport_priv(lp);
-	int rc = 0;
-
-	spin_lock_bh(&port->fcoe_pending_queue.lock);
-
-	if (skb)
-		__skb_queue_tail(&port->fcoe_pending_queue, skb);
-
-	if (port->fcoe_pending_queue_active)
-		goto out;
-	port->fcoe_pending_queue_active = 1;
-
-	while (port->fcoe_pending_queue.qlen) {
-		/* keep qlen > 0 until bnx2fc_start_io succeeds */
-		port->fcoe_pending_queue.qlen++;
-		skb = __skb_dequeue(&port->fcoe_pending_queue);
-
-		spin_unlock_bh(&port->fcoe_pending_queue.lock);
-		rc = fcoe_start_io(skb);
-		spin_lock_bh(&port->fcoe_pending_queue.lock);
-
-		if (rc) {
-			__skb_queue_head(&port->fcoe_pending_queue, skb);
-			/* undo temporary increment above */
-			port->fcoe_pending_queue.qlen--;
-			break;
-		}
-		/* undo temporary increment above */
-		port->fcoe_pending_queue.qlen--;
-	}
-
-	if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
-		lp->qfull = 0;
-	if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
-		mod_timer(&port->timer, jiffies + 2);
-	port->fcoe_pending_queue_active = 0;
-out:
-	if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-		lp->qfull = 1;
-	spin_unlock_bh(&port->fcoe_pending_queue.lock);
-}
-
 int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
 {
-	struct bnx2fc_global_s *bg;
-	struct page *page;
-
-	bg = &bnx2fc_global;
-	page = bg->crc_eof_page;
-	if (!page) {
-		page = alloc_page(GFP_ATOMIC);
-		if (!page)
-			return -ENOMEM;
-		bg->crc_eof_page = page;
-		bg->crc_eof_offset = 0;
-	}
-
-	get_page(page);
-	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
-			   bg->crc_eof_offset, tlen);
-	skb->len += tlen;
-	skb->data_len += tlen;
-	skb->truesize += tlen;
-	bg->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+	int rc;
+	spin_lock(&bnx2fc_global_lock);
+	rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+	spin_unlock(&bnx2fc_global_lock);
 
-	if (bg->crc_eof_offset >= PAGE_SIZE) {
-		bg->crc_eof_page = NULL;
-		bg->crc_eof_offset = 0;
-		put_page(page);
-	}
-	return 0;
+	return rc;
 }
 
 static void bnx2fc_abort_io(struct fc_lport *lport)
@@ -434,9 +349,9 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
 	/* send down to lld */
 	fr_dev(fp) = lport;
 	if (port->fcoe_pending_queue.qlen)
-		bnx2fc_check_wait_queue(lport, skb);
+		fcoe_check_wait_queue(lport, skb);
 	else if (fcoe_start_io(skb))
-		bnx2fc_check_wait_queue(lport, skb);
+		fcoe_check_wait_queue(lport, skb);
 
 	return 0;
 }
@@ -458,7 +373,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
 	struct bnx2fc_hba *hba;
 	struct fc_frame_header *fh;
 	struct fcoe_rcv_info *fr;
-	struct bnx2fc_global_s *bg;
+	struct fcoe_percpu_s *bg;
 	unsigned short oxid;
 
 	hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
@@ -496,7 +411,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
 
 	__skb_queue_tail(&bg->fcoe_rx_list, skb);
 	if (bg->fcoe_rx_list.qlen == 1)
-		wake_up_process(bg->l2_thread);
+		wake_up_process(bg->thread);
 
 	spin_unlock_bh(&bg->fcoe_rx_list.lock);
 
@@ -508,7 +423,7 @@ err:
 
 static int bnx2fc_l2_rcv_thread(void *arg)
 {
-	struct bnx2fc_global_s *bg = arg;
+	struct fcoe_percpu_s *bg = arg;
 	struct sk_buff *skb;
 
 	while (!kthread_should_stop()) {
@@ -808,7 +723,7 @@ static int bnx2fc_net_config(struct fc_lport *lport)
 
 	skb_queue_head_init(&port->fcoe_pending_queue);
 	port->fcoe_pending_queue_active = 0;
-	setup_timer(&port->timer, bnx2fc_queue_timer, (unsigned long) lport);
+	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
 
 	if (!lport->vport) {
 		wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
@@ -925,7 +840,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event)
 			per_cpu_ptr(lport->dev_stats,
 				    get_cpu())->LinkFailureCount++;
 			put_cpu();
-			bnx2fc_clean_pending_queue(lport);
+			fcoe_clean_pending_queue(lport);
 
 			init_waitqueue_head(&hba->shutdown_wait);
 			BNX2FC_HBA_DBG(lport, "indicate_netevent "
@@ -1441,7 +1356,7 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
 	del_timer_sync(&port->timer);
 
 	/* Free existing transmit skbs */
-	bnx2fc_clean_pending_queue(lport);
+	fcoe_clean_pending_queue(lport);
 
 	bnx2fc_interface_put(hba);
 
@@ -1893,7 +1808,7 @@ static int bnx2fc_disable(struct net_device *netdev)
 		printk(KERN_ERR PFX "bnx2fc_disable: hba not found\n");
 	} else {
 		fcoe_ctlr_link_down(&hba->ctlr);
-		bnx2fc_clean_pending_queue(hba->ctlr.lp);
+		fcoe_clean_pending_queue(hba->ctlr.lp);
 	}
 
 nodev:
@@ -2225,9 +2140,9 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
 				(void *)p,
 				"bnx2fc_thread/%d", cpu);
 	/* bind thread to the cpu */
-	if (likely(!IS_ERR(p->thread))) {
+	if (likely(!IS_ERR(p->iothread))) {
 		kthread_bind(thread, cpu);
-		p->thread = thread;
+		p->iothread = thread;
 		wake_up_process(thread);
 	}
 }
@@ -2244,8 +2159,8 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
 	/* Prevent any new work from being queued for this CPU */
 	p = &per_cpu(bnx2fc_percpu, cpu);
 	spin_lock_bh(&p->fp_work_lock);
-	thread = p->thread;
-	p->thread = NULL;
+	thread = p->iothread;
+	p->iothread = NULL;
 
 
 	/* Free all work in the list */
@@ -2302,7 +2217,7 @@ static int bnx2fc_cpu_callback(struct notifier_block *nfb,
  **/
 static int __init bnx2fc_mod_init(void)
 {
-	struct bnx2fc_global_s *bg;
+	struct fcoe_percpu_s *bg;
 	struct task_struct *l2_thread;
 	int rc = 0;
 	unsigned int cpu = 0;
@@ -2333,14 +2248,14 @@ static int __init bnx2fc_mod_init(void)
 	l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
 				   (void *)bg,
 				   "bnx2fc_l2_thread");
-	if (IS_ERR(bg->l2_thread)) {
+	if (IS_ERR(bg->thread)) {
 		rc = PTR_ERR(l2_thread);
 		fcoe_transport_detach(&bnx2fc_transport);
 		return rc;
 	}
 	wake_up_process(l2_thread);
 	spin_lock_bh(&bg->fcoe_rx_list.lock);
-	bg->l2_thread = l2_thread;
+	bg->thread = l2_thread;
 	spin_unlock_bh(&bg->fcoe_rx_list.lock);
 
 	for_each_possible_cpu(cpu) {
@@ -2366,7 +2281,7 @@ static void __exit bnx2fc_mod_exit(void)
 {
 	LIST_HEAD(to_be_deleted);
 	struct bnx2fc_hba *hba, *next;
-	struct bnx2fc_global_s *bg;
+	struct fcoe_percpu_s *bg;
 	struct task_struct *l2_thread;
 	struct sk_buff *skb;
 	unsigned int cpu = 0;
@@ -2400,8 +2315,8 @@ static void __exit bnx2fc_mod_exit(void)
 	/* Destroy global thread */
 	bg = &bnx2fc_global;
 	spin_lock_bh(&bg->fcoe_rx_list.lock);
-	l2_thread = bg->l2_thread;
-	bg->l2_thread = NULL;
+	l2_thread = bg->thread;
+	bg->thread = NULL;
 	while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
 		kfree_skb(skb);
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index cf56e44..0a0754d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -492,7 +492,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 	struct fcoe_hdr *hp;
 	struct fcoe_crc_eof *cp;
 	struct fcoe_rcv_info *fr;
-	struct bnx2fc_global_s *bg;
+	struct fcoe_percpu_s *bg;
 
 	struct sk_buff *skb;
 	unsigned int hlen;	/* header length implies the version */
@@ -610,7 +610,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 		bg = &bnx2fc_global;
 		spin_lock_bh(&bg->fcoe_rx_list.lock);
 
-		if (unlikely(!bg->l2_thread)) {
+		if (unlikely(!bg->thread)) {
 			spin_unlock_bh(&bg->fcoe_rx_list.lock);
 			printk(KERN_ERR PFX "ERROR-thread is NULL\n");
 			kfree_skb(skb);
@@ -618,7 +618,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 		}
 		__skb_queue_tail(&bg->fcoe_rx_list, skb);
 		if (bg->fcoe_rx_list.qlen == 1)
-			wake_up_process(bg->l2_thread);
+			wake_up_process(bg->thread);
 		spin_unlock_bh(&bg->fcoe_rx_list.lock);
 	} else {
 		BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
@@ -922,7 +922,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
 
 				fps = &per_cpu(bnx2fc_percpu, cpu);
 				spin_lock_bh(&fps->fp_work_lock);
-				if (unlikely(!fps->thread))
+				if (unlikely(!fps->iothread))
 					goto unlock;
 
 				work =  bnx2fc_alloc_work(tgt, wqe);
@@ -934,8 +934,8 @@ unlock:
 				spin_unlock_bh(&fps->fp_work_lock);
 
 				/* Pending work request completion */
-				if (fps->thread && work)
-					wake_up_process(fps->thread);
+				if (fps->iothread && work)
+					wake_up_process(fps->iothread);
 				else
 					bnx2fc_process_cq_compl(tgt, wqe);
 			}
-- 
1.7.0.6







More information about the devel mailing list