Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 28 Apr 2007 08:30:39 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 118905 for review
Message-ID:  <200704280830.l3S8UdgM048387@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=118905

Change 118905 by kmacy@kmacy_vt-x:opentoe_init on 2007/04/28 08:30:32

	start making TOE code BSD palatable

Affected files ...

.. //depot/projects/opentoe/sys/dev/cxgb/cxgb_adapter.h#15 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_io.c#2 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_ddp.c#2 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_listen.c#2 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_tom.c#2 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_tom.h#2 edit

Differences ...

==== //depot/projects/opentoe/sys/dev/cxgb/cxgb_adapter.h#15 (text+ko) ====

@@ -71,6 +71,7 @@
 	struct mbuf *head;
 	struct mbuf *tail;
 	uint32_t     qlen;
+	struct mtx   lock;
 };
 
 static __inline void

==== //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_io.c#2 (text+ko) ====

@@ -48,12 +48,12 @@
 const unsigned int t3_ulp_extra_len[] = {0, 4, 4, 8};
 
 /*
- * This sk_buff holds a fake header-only TCP segment that we use whenever we
+ * This mbuf holds a fake header-only TCP segment that we use whenever we
  * need to exploit SW TCP functionality that expects TCP headers, such as
  * tcp_create_openreq_child().  It's a RO buffer that may be used by multiple
  * CPUs without locking.
  */
-static struct sk_buff *tcphdr_skb __read_mostly;
+static struct mbuf *tcphdr_skb __read_mostly;
 
 /*
  * The number of WRs needed for an skb depends on the number of page fragments
@@ -84,29 +84,30 @@
  * call to the handler.  Should be used if the handler may drop a socket
  * reference.
  */
-static inline void process_cpl_msg_ref(void (*fn)(struct sock *,
-						  struct sk_buff *),
-				       struct sock *sk, struct sk_buff *skb)
+static inline void
+process_cpl_msg_ref(void (*fn)(struct socket *, struct mbuf *),
+    struct socket *so, struct mbuf *m)
 {
 	sock_hold(sk);
 	process_cpl_msg(fn, sk, skb);
 	sock_put(sk);
 }
 
-static inline int is_t3a(const struct toedev *dev)
+static inline int
+is_t3a(const struct toedev *dev)
 {
 	return dev->ttid == TOE_ID_CHELSIO_T3;
 }
 
 /*
- * Returns an sk_buff for a reply CPL message of size len.  If the input
- * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * Returns an mbuf for a reply CPL message of size len.  If the input
+ * mbuf has no other users it is trimmed and reused, otherwise a new buffer
  * is allocated.  The input skb must be of size at least len.  Note that this
  * operation does not destroy the original skb data even if it decides to reuse
  * the buffer.
  */
-static struct sk_buff *get_cpl_reply_skb(struct sk_buff *skb, size_t len,
-					 int gfp)
+static struct mbuf *
+get_cpl_reply_mbuf(struct mbuf *m, size_t len, int gfp)
 {
 	if (likely(!skb_cloned(skb))) {
 		BUG_ON(skb->len < len);
@@ -123,8 +124,8 @@
 /*
  * Like get_cpl_reply_skb() but the returned buffer starts out empty.
  */
-static struct sk_buff *__get_cpl_reply_skb(struct sk_buff *skb, size_t len,
-					   int gfp)
+static struct mbuf *
+__get_cpl_reply_mbuf(struct mbuf *m, size_t len)
 {
 	if (likely(!skb_cloned(skb) && !skb->data_len)) {
 		__skb_trim(skb, 0);
@@ -141,8 +142,9 @@
  * If through_l2t is set the message is subject to ARP processing, otherwise
  * it is sent directly.
  */
-static inline void send_or_defer(struct sock *sk, struct tcp_sock *tp,
-				 struct sk_buff *skb, int through_l2t)
+static inline void
+send_or_defer(struct socket *so, struct tcp_sock *tp, struct mbuf *skb,
+    int through_l2t)
 {
 	if (unlikely(sk->sk_state == TCP_SYN_SENT))
 		__skb_queue_tail(&tp->out_of_order_queue, skb);  // defer
@@ -157,7 +159,8 @@
  * whether the packet should use a control Tx queue, bits 1..3 determine
  * the queue set to use.
  */
-static inline unsigned int mkprio(unsigned int cntrl, const struct sock *sk)
+static inline unsigned int
+mkprio(unsigned int cntrl, const struct socket *so)
 {
 	return cntrl;
 }
@@ -165,8 +168,8 @@
 /*
  * Populate a TID_RELEASE WR.  The skb must be already propely sized.
  */
-static inline void mk_tid_release(struct sk_buff *skb, const struct sock *sk,
-				  unsigned int tid)
+static inline void
+mk_tid_release(struct mbuf *skb, const struct socket *so, unsigned int tid)
 {
 	struct cpl_tid_release *req;
 
@@ -179,8 +182,8 @@
 /*
  * Insert a socket to the TID table and take an extra reference.
  */
-static inline void sk_insert_tid(struct tom_data *d, struct sock *sk,
-				 unsigned int tid)
+static inline void
+sk_insert_tid(struct tom_data *d, struct socket *so, unsigned int tid)
 {
 	sock_hold(sk);
 	cxgb3_insert_tid(d->cdev, d->client, sk, tid);
@@ -194,7 +197,8 @@
  *	Returns the index of the value in the MTU table that is closest to but
  *	does not exceed the target MTU.
  */
-static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
+static unsigned int
+find_best_mtu(const struct t3c_data *d, unsigned short mtu)
 {
 	int i = 0;
 
@@ -203,7 +207,8 @@
 	return i;
 }
 
-static unsigned int select_mss(struct sock *sk, unsigned int pmtu)
+static unsigned int
+select_mss(struct socket *so, unsigned int pmtu)
 {
 	unsigned int idx;
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -228,7 +233,8 @@
 /*
  * Returns true if a connection TID is in range and currently unused.
  */
-static int valid_new_tid(const struct tid_info *t, unsigned int tid)
+static int
+valid_new_tid(const struct tid_info *t, unsigned int tid)
 {
 	return tid < t->ntids && !t->tid_tab[tid].ctx;
 }
@@ -248,7 +254,8 @@
  * Resources related to the offload state of a connection (e.g., L2T entries)
  * must have been relinquished prior to calling this.
  */
-static void connection_done(struct sock *sk)
+static void
+connection_done(struct socket *so)
 {
 #if 0
 	printk("connection_done: TID: %u, state: %d, dead %d, refs %d\n",
@@ -284,7 +291,8 @@
  * Determine the receive window scaling factor given a target max
  * receive window.
  */
-static inline int select_rcv_wscale(int space, int wscale_ok, int window_clamp)
+static inline int
+select_rcv_wscale(int space, int wscale_ok, int window_clamp)
 {
 	int wscale = 0;
 
@@ -304,7 +312,8 @@
 /*
  * The next two functions calculate the option 0 value for a socket.
  */
-static inline unsigned int calc_opt0h(struct sock *sk)
+static inline unsigned int
+calc_opt0h(struct socket *so)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -313,7 +322,8 @@
 	    V_WND_SCALE(RCV_WSCALE(tp)) | V_MSS_IDX(MTU_IDX(tp));
 }
 
-static inline unsigned int calc_opt0l(const struct sock *sk)
+static inline unsigned int
+calc_opt0l(const struct socket *so)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -321,7 +331,8 @@
 	       V_RCV_BUFSIZ(min(tp->rcv_wnd >> 10, (u32)M_RCV_BUFSIZ));
 }
 
-static inline unsigned int calc_opt2(const struct sock *sk)
+static inline unsigned int
+calc_opt2(const struct socket *so)
 {
 	const struct toedev *dev = TOE_DEV(sk);
 	int flv_valid = TOM_TUNABLE(dev, cong_alg) != -1;
@@ -334,12 +345,13 @@
 /*
  * This function is intended for allocations of small control messages.
  * Such messages go as immediate data and usually the pakets are freed
- * immediately.  We maintain a cache of one small sk_buff and use it whenever
+ * immediately.  We maintain a cache of one small mbuf and use it whenever
  * it is available (has a user count of 1).  Otherwise we get a fresh buffer.
  */
-static struct sk_buff *alloc_ctrl_skb(const struct tcp_sock *tp, int len)
+static struct mbuf *
+alloc_ctrl_skb(const struct tcp_sock *tp, int len)
 {
-	struct sk_buff *skb = CTRL_SKB_CACHE(tp);
+	struct mbuf *skb = CTRL_SKB_CACHE(tp);
 
 	if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
 		__skb_trim(skb, 0);
@@ -361,7 +373,8 @@
  * of outstanding WRs, however note that SYN_RCV sockets don't have any such
  * WRs so that the two uses do not overlap.
  */
-static void synq_add(struct sock *parent, struct sock *child)
+static void
+synq_add(struct sock *parent, struct sock *child)
 {
 	struct tcp_sock *p = tcp_sk(parent);
 	struct tcp_sock *c = tcp_sk(child);
@@ -378,7 +391,8 @@
 	c->fastpath_skb_hint = (void *)parent;
 }
 
-static void synq_remove(struct tcp_sock *child)
+static void
+synq_remove(struct tcp_sock *child)
 {
 	struct sock *next = (struct sock *)child->forward_skb_hint;
 	struct sock *prev = (struct sock *)child->fastpath_skb_hint;
@@ -392,7 +406,8 @@
 	reset_synq(child);
 }
 
-static inline void free_wr_skb(struct sk_buff *skb)
+static inline void
+free_wr_skb(struct mbuf *skb)
 {
 #if defined(CONFIG_T3_ZCOPY_SENDMSG) || defined(CONFIG_T3_ZCOPY_SENDMSG_MODULE)
 	if (skb->data[0] == FW_WROPCODE_OFLD_TX_DATA)
@@ -401,20 +416,22 @@
 	kfree_skb(skb);
 }
 
-static void purge_wr_queue(struct tcp_sock *tp)
+static void
+purge_wr_queue(struct tcp_sock *tp)
 {
-	struct sk_buff *skb;
+	struct mbuf *skb;
 	while ((skb = dequeue_wr(tp)) != NULL)
 		free_wr_skb(skb);
 }
 
 #define wr_queue_walk(tp, skb) \
-	for (skb = peek_wr(tp); skb; skb = (struct sk_buff *)skb->input_dev)
+	for (skb = peek_wr(tp); skb; skb = (struct mbuf *)skb->input_dev)
 
 /*
- * Returns true if an sk_buff carries urgent data.
+ * Returns true if an mbuf carries urgent data.
  */
-static inline int skb_urgent(struct sk_buff *skb)
+static inline int
+skb_urgent(struct mbuf *skb)
 {
 	return (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG) != 0;
 }
@@ -422,13 +439,14 @@
 /*
  * Generic ARP failure handler that discards the buffer.
  */
-static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
+static void
+arp_failure_discard(struct t3cdev *cdev, struct mbuf *m)
 {
-	kfree_skb(skb);
+	m_freem(m);
 }
 
-static inline void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
-				   int len)
+static inline void
+make_tx_data_wr(struct socket *so, struct mbuf *m, int len)
 {
 	struct tx_data_wr *req;
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -451,7 +469,7 @@
 		req->flags |= htonl(F_TX_INIT | V_TX_CPU_IDX(qset(tp)));
 		/*
 		 * The send buffer size is in 32KB.  In addition Linux doubles
-		 * what the user requested to account for header and sk_buff
+		 * what the user requested to account for header and mbuf
 		 * overhead.  We care about pure payload here so divide by an
 		 * extra 2 to get the user's requested value.
 		 */
@@ -468,11 +486,12 @@
  * socket lock held.  Returns the amount of send buffer space that was freed
  * as a result of sending queued data to the TOE.
  */
-int t3_push_frames(struct sock *sk, int req_completion)
+int
+t3_push_frames(struct socket *so, int req_completion)
 {
 	int total_size = 0;
 	struct tcp_sock *tp = tcp_sk(sk);
-	struct sk_buff *skb;
+	struct mbuf *m;
 	struct t3cdev *cdev;
 	struct tom_data *d;
 
@@ -551,14 +570,15 @@
 
 static inline void free_atid(struct t3cdev *cdev, unsigned int tid)
 {
-	struct sock *sk = cxgb3_free_atid(cdev, tid);
+	struct socket *so = cxgb3_free_atid(cdev, tid);
 	if (sk)
 		sock_put(sk);
 }
 /*
  * Release resources held by an offload connection (TID, L2T entry, etc.)
  */
-static void t3_release_offload_resources(struct sock *sk)
+static void
+t3_release_offload_resources(struct socket *so)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct toedev *tdev = TOE_DEV(sk);
@@ -610,16 +630,18 @@
  * closed connection.  Most messages are illegal at that point except
  * ABORT_RPL_RSS and GET_TCB_RPL sent by DDP.
  */
-static int bad_backlog_msg(unsigned int opcode)
+static int
+bad_backlog_msg(unsigned int opcode)
 {
 	return opcode != CPL_ABORT_RPL_RSS && opcode != CPL_GET_TCB_RPL;
 }
 
 /*
- * Called for each sk_buff in a socket's receive backlog during
+ * Called for each mbuf in a socket's receive backlog during
  * backlog processing.
  */
-static int t3_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+static int
+t3_backlog_rcv(struct socket *so, struct mbuf *skb)
 {
 #if VALIDATE_TID
 	unsigned int opcode = ntohl(skb->csum) >> 24;
@@ -647,7 +669,8 @@
  * operations do not contain the offload backlog handler, we install that
  * directly to the socket.
  */
-static inline void install_offload_ops(struct sock *sk)
+static inline void
+install_offload_ops(struct socket *so)
 {
 	sk->sk_prot = &t3_tcp_prot;
 	sk->sk_backlog_rcv = t3_backlog_rcv;
@@ -665,10 +688,11 @@
 }
 
 #if DEBUG_WR
-static void dump_wrs(struct sock *sk)
+static void
+dump_wrs(struct socket *so)
 {
 	u64 *d;
-	struct sk_buff *p;
+	struct mbuf *p;
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	printk("TID %u info:\n", TID(tp));
@@ -689,17 +713,19 @@
 	}
 }
 
-static int count_pending_wrs(const struct tcp_sock *tp)
+static int
+count_pending_wrs(const struct tcp_sock *tp)
 {
 	int n = 0;
-	const struct sk_buff *p;
+	const struct mbuf *p;
 
 	wr_queue_walk(tp, p)
 		n += p->csum;
 	return n;
 }
 
-static void check_wr_invariants(const struct tcp_sock *tp)
+static void
+check_wr_invariants(const struct tcp_sock *tp)
 {
 	int pending = count_pending_wrs(tp);
 
@@ -710,7 +736,8 @@
 }
 #endif
 
-static void t3_idiag_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+static void
+t3_idiag_get_info(struct socket *so, u32 ext, struct mbuf *skb)
 {
 #if DEBUG_WR
 	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -746,7 +773,8 @@
 	T3_CONG_OPS("newreno"),     T3_CONG_OPS("highspeed")
 };
 
-static void mk_act_open_req(struct sock *sk, struct sk_buff *skb,
+static void
+mk_act_open_req(struct socket *so, struct mbuf *skb,
 			    unsigned int atid, const struct l2t_entry *e)
 {
 	struct cpl_act_open_req *req;
@@ -769,7 +797,8 @@
 /*
  * Convert an ACT_OPEN_RPL status to a Linux errno.
  */
-static int act_open_rpl_status_to_errno(int status)
+static int
+act_open_rpl_status_to_errno(int status)
 {
 	switch (status) {
 	case CPL_ERR_CONN_RESET:
@@ -788,9 +817,10 @@
 	}
 }
 
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb);
+static void act_open_req_arp_failure(struct t3cdev *dev, struct mbuf *skb);
 
-static void fail_act_open(struct sock *sk, int errno)
+static void
+fail_act_open(struct socket *so, int errno)
 {
 	sk->sk_err = errno;
 	sk->sk_error_report(sk);
@@ -799,10 +829,11 @@
 	TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
 }
 
-static void act_open_retry_timer(unsigned long data)
+static void
+act_open_retry_timer(unsigned long data)
 {
-	struct sk_buff *skb;
-	struct sock *sk = (struct sock *)data;
+	struct mbuf *skb;
+	struct socket *so = (struct sock *)data;
 	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	bh_lock_sock(sk);
@@ -828,7 +859,8 @@
 /*
  * Handle active open failures.
  */
-static void active_open_failed(struct sock *sk, struct sk_buff *skb)
+static void
+active_open_failed(struct socket *so, struct mbuf *m)
 {
 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
 	struct inet_connection_sock *icsk = inet_csk(sk);
@@ -846,7 +878,8 @@
 /*
  * Return whether a failed active open has allocated a TID
  */
-static inline int act_open_has_tid(int status)
+static inline int
+act_open_has_tid(int status)
 {
 	return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
 	       status != CPL_ERR_ARP_MISS;
@@ -855,12 +888,13 @@
 /*
  * Process an ACT_OPEN_RPL CPL message.
  */
-static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+static int
+do_act_open_rpl(struct t3cdev *cdev, struct mbuf *m, void *ctx)
 {
-	struct sock *sk = (struct sock *)ctx;
-	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+	struct socket *so = (struct socket *)ctx;
+	struct cpl_act_open_rpl *rpl = cplhdr(m);
 
-	VALIDATE_SOCK(sk);
+	VALIDATE_SOCK(so);
 
 	if (cdev->type != T3A && act_open_has_tid(rpl->status))
 		cxgb3_queue_tid_release(cdev, GET_TID(rpl));
@@ -877,9 +911,9 @@
  * check SOCK_DEAD or sk->sk_sock.  Or maybe generate the error here but don't
  * free the atid.  Hmm.
  */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
+static void act_open_req_arp_failure(struct t3cdev *dev, struct mbuf *skb)
 {
-	struct sock *sk = skb->sk;
+	struct socket *so = skb->sk;
 
 	sock_hold(sk);
 	bh_lock_sock(sk);
@@ -890,8 +924,8 @@
 		} else {
 			/*
 			 * Smart solution: Synthesize an ACTIVE_OPEN_RPL in the
-			 * existing sk_buff and queue it to the backlog.  We
-			 * are certain the sk_buff is not shared.  We also
+			 * existing mbuf and queue it to the backlog.  We
+			 * are certain the mbuf is not shared.  We also
 			 * don't bother trimming the buffer.
 			 */
 			struct cpl_act_open_rpl *rpl = cplhdr(skb);
@@ -916,7 +950,7 @@
 /*
  * Determine the receive window size for a socket.
  */
-static unsigned int select_rcv_wnd(struct sock *sk)
+static unsigned int select_rcv_wnd(struct socket *so)
 {
 	unsigned int wnd = tcp_full_space(sk);
 
@@ -931,7 +965,7 @@
 }
 
 #if defined(TCP_CONGESTION_CONTROL)
-static void pivot_ca_ops(struct sock *sk, int cong)
+static void pivot_ca_ops(struct socket *so, int cong)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -948,7 +982,7 @@
  * Assign offload parameters to some socket fields.  This code is used by
  * both active and passive opens.
  */
-static inline void init_offload_sk(struct sock *sk, struct toedev *dev,
+static inline void init_offload_sk(struct socket *so, struct toedev *dev,
 				   unsigned int tid, struct l2t_entry *e,
 				   struct dst_entry *dst)
 {
@@ -980,10 +1014,10 @@
 /*
  * Send an active open request.
  */
-int t3_connect(struct toedev *tdev, struct sock *sk,
+int t3_connect(struct toedev *tdev, struct socket *so,
 	       struct net_device *egress_dev)
 {
-	struct sk_buff *skb;
+	struct mbuf *skb;
 	struct l2t_entry *e;
 	struct tom_data *d = TOM_DATA(tdev);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -1029,7 +1063,7 @@
  * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
  * and send it along.
  */
-static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
+static void abort_arp_failure(struct t3cdev *cdev, struct mbuf *skb)
 {
 	struct cpl_abort_req *req = cplhdr(skb);
 
@@ -1043,7 +1077,7 @@
  * not try to send a message after the connection has closed.  Returns 1 if
  * an ABORT_REQ wasn't generated after all, 0 otherwise.
  */
-int t3_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
+int t3_send_reset(struct socket *so, int mode, struct mbuf *skb)
 {
 	struct cpl_abort_req *req;
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -1093,7 +1127,7 @@
  */
 static void reset_listen_child(struct sock *child)
 {
-	struct sk_buff *skb = alloc_skb_nofail(sizeof(struct cpl_abort_req));
+	struct mbuf *skb = alloc_skb_nofail(sizeof(struct cpl_abort_req));
 
 	sock_hold(child);      // need to survive past inet_csk_destroy_sock()
 	local_bh_disable();
@@ -1116,25 +1150,25 @@
  * reason.  These sockets are terminated through a work request from process
  * context.
  */
-static struct sock *reap_list;
-static spinlock_t reap_list_lock = SPIN_LOCK_UNLOCKED;
+static struct socket *reap_list;
+static struct mtx reap_list_lock;
 
 /*
  * Process the reap list.
  */
 DECLARE_TASK_FUNC(process_reap_list, task_param)
 {
-	spin_lock_bh(&reap_list_lock);
+	mtx_lock(&reap_list_lock);
 	while (reap_list) {
-		struct sock *sk = reap_list;
+		struct socket *so = reap_list;
 
 		reap_list = sk->sk_user_data;
 		sk->sk_user_data = NULL;
-		spin_unlock_bh(&reap_list_lock);
+		mtx_unlock(&reap_list_lock);
 		reset_listen_child(sk);
-		spin_lock_bh(&reap_list_lock);
+		mtx_lock(&reap_list_lock);
 	}
-	spin_unlock_bh(&reap_list_lock);
+	mtx_unlock(&reap_list_lock);
 }
 
 static T3_DECLARE_WORK(reap_task, process_reap_list, NULL);
@@ -1145,21 +1179,21 @@
  * from softirq context and any associated open request must have already
  * been freed.
  */
-static void add_to_reap_list(struct sock *sk)
+static void add_to_reap_list(struct socket *so)
 {
 	BUG_ON(sk->sk_user_data);
 
 	release_tcp_port(sk); // release the port immediately, it may be reused
 
-	spin_lock_bh(&reap_list_lock);
+	mtx_lock(&reap_list_lock);
 	sk->sk_user_data = reap_list;
 	reap_list = sk;
 	if (!sk->sk_user_data)
 		schedule_work(&reap_task);
-	spin_unlock_bh(&reap_list_lock);
+	mtx_unlock(&reap_list_lock);
 }
 
-static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
+static void __set_tcb_field(struct socket *so, struct mbuf *skb, u16 word,
 			    u64 mask, u64 val, int no_reply)
 {
 	struct cpl_set_tcb_field *req;
@@ -1178,9 +1212,9 @@
 	send_or_defer(sk, tp, skb, 0);
 }
 
-void t3_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
+void t3_set_tcb_field(struct socket *so, u16 word, u64 mask, u64 val)
 {
-	struct sk_buff *skb;
+	struct mbuf *skb;
 
 	if (sk->sk_state == TCP_CLOSE || sock_flag(sk, ABORT_SHUTDOWN))
 		return;
@@ -1192,7 +1226,7 @@
 /*
  * Set one of the t_flags bits in the TCB.
  */
-static void set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
+static void set_tcb_tflag(struct socket *so, unsigned int bit_pos, int val)
 {
 	t3_set_tcb_field(sk, W_TCB_T_FLAGS1, 1ULL << bit_pos, val << bit_pos);
 }
@@ -1200,7 +1234,7 @@
 /*
  * Send a SET_TCB_FIELD CPL message to change a connection's Nagle setting.
  */
-void t3_set_nagle(struct sock *sk)
+void t3_set_nagle(struct socket *so)
 {
 	set_tcb_tflag(sk, S_TF_NAGLE, !(tcp_sk(sk)->nonagle & TCP_NAGLE_OFF));
 }
@@ -1208,12 +1242,14 @@
 /*
  * Send a SET_TCB_FIELD CPL message to change a connection's keepalive setting.
  */
-void t3_set_keepalive(struct sock *sk, int on_off)
+void
+t3_set_keepalive(struct socket *so, int on_off)
 {
-	set_tcb_tflag(sk, S_TF_KEEPALIVE, on_off);
+	set_tcb_tflag(so, S_TF_KEEPALIVE, on_off);
 }
 
-void t3_set_rcv_coalesce_enable(struct sock *sk, int on_off)
+void
+t3_set_rcv_coalesce_enable(struct socket *so, int on_off)
 {
 	set_tcb_tflag(sk, S_TF_RCV_COALESCE_ENABLE, on_off);
 }
@@ -1221,10 +1257,11 @@
 /*
  * Send a SET_TCB_FIELD CPL message to change a connection's TOS setting.
  */
-void t3_set_tos(struct sock *sk)
+void
+t3_set_tos(struct socket *so)
 {
-	t3_set_tcb_field(sk, W_TCB_TOS, V_TCB_TOS(M_TCB_TOS),
-			 V_TCB_TOS(SK_TOS(sk)));
+	t3_set_tcb_field(so, W_TCB_TOS, V_TCB_TOS(M_TCB_TOS),
+			 V_TCB_TOS(SK_TOS(so)));
 }
 
 /*
@@ -1243,7 +1280,8 @@
      ((V_TCB_RX_DDP_BUF0_OFFSET((u64)1) | V_TCB_RX_DDP_BUF0_LEN((u64)2)) <<\
       32))
 
-void t3_enable_ddp(struct sock *sk, int on)
+void
+t3_enable_ddp(struct socket *so, int on)
 {
 	if (on)
 		t3_set_tcb_field(sk, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1),
@@ -1256,14 +1294,16 @@
 				 TP_DDP_TIMER_WORKAROUND_VAL);
 }
 
-void t3_set_ddp_tag(struct sock *sk, int buf_idx, unsigned int tag_color)
+void
+t3_set_ddp_tag(struct socket *so, int buf_idx, unsigned int tag_color)
 {
-	t3_set_tcb_field(sk, W_TCB_RX_DDP_BUF0_TAG + buf_idx,
+	t3_set_tcb_field(so, W_TCB_RX_DDP_BUF0_TAG + buf_idx,
 			 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
 			 tag_color);
 }
 
-void t3_set_ddp_buf(struct sock *sk, int buf_idx, unsigned int offset,
+void
+t3_set_ddp_buf(struct socket *so, int buf_idx, unsigned int offset,
 		    unsigned int len)
 {
 	if (buf_idx == 0)
@@ -1280,7 +1320,8 @@
 			 V_TCB_RX_DDP_BUF1_LEN(((u64)len) << 32));
 }
 
-int t3_set_cong_control(struct sock *sk, const char *name)
+int
+t3_set_cong_control(struct socket *so, const char *name)
 {
 	int cong_algo;
 
@@ -1293,11 +1334,12 @@
 	return 0;
 }
 
-int t3_get_tcb(struct sock *sk)
+int
+t3_get_tcb(struct socket *so)
 {
 	struct cpl_get_tcb *req;
 	struct tcp_sock *tp = tcp_sk(sk);
-	struct sk_buff *skb = alloc_skb(sizeof(*req), gfp_any());
+	struct mbuf *skb = alloc_skb(sizeof(*req), gfp_any());
 
 	if (!skb)
 		return -ENOMEM;
@@ -1318,12 +1360,13 @@
 /*
  * Send RX credits through an RX_DATA_ACK CPL message.  If nofail is 0 we are
  * permitted to return without sending the message in case we cannot allocate
- * an sk_buff.  Returns the number of credits sent.
+ * an mbuf.  Returns the number of credits sent.
  */
-u32 t3_send_rx_credits(struct sock *sk, u32 credits, u32 dack, int nofail)
+u32
+t3_send_rx_credits(struct socket *so, u32 credits, u32 dack, int nofail)
 {
 	struct cpl_rx_data_ack *req;
-	struct sk_buff *skb;
+	struct mbuf *skb;
 
 	/* forcing nofail until we resolve how to properly avoid that the TCP
 	 * window closes on us.
@@ -1350,9 +1393,10 @@
  * This is only used in DDP mode, so we take the opportunity to also set the
  * DACK mode and flush any Rx credits.
  */
-void t3_send_rx_modulate(struct sock *sk)
+void
+t3_send_rx_modulate(struct socket *so)
 {
-	struct sk_buff *skb;
+	struct mbuf *skb;
 	struct cpl_rx_data_ack *req;
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -1372,7 +1416,8 @@
 /*
  * Handle receipt of an urgent pointer.
  */
-static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
+static void
+handle_urg_ptr(struct socket *so, u32 urg_seq)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -1384,7 +1429,7 @@
 	sk_send_sigurg(sk);
 	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
 	    !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
-		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+		struct mbuf *skb = skb_peek(&sk->sk_receive_queue);
 
 		tp->copied_seq++;
 		if (skb && tp->copied_seq - TCP_SKB_CB(skb)->seq >= skb->len)
@@ -1397,7 +1442,8 @@
 /*
  * Returns true if a socket cannot accept new Rx data.
  */
-static inline int sk_no_receive(const struct sock *sk)
+static inline int
+sk_no_receive(const struct socket *so)
 {
 	return (sk->sk_shutdown & RCV_SHUTDOWN);
 }
@@ -1405,12 +1451,13 @@
 /*
  * Process an urgent data notification.
  */
-static void rx_urg_notify(struct sock *sk, struct sk_buff *skb)
+static void
+rx_urg_notify(struct socket *so, struct mbuf *m)
 {
-	struct cpl_rx_urg_notify *hdr = cplhdr(skb);
+	struct cpl_rx_urg_notify *hdr = cplhdr(m);
 
 	if (!sk_no_receive(sk))
-		handle_urg_ptr(sk, ntohl(hdr->seq));
+		handle_urg_ptr(so, ntohl(hdr->seq));
 
 	__kfree_skb(skb);
 }
@@ -1418,13 +1465,14 @@
 /*
  * Handler for RX_URG_NOTIFY CPL messages.
  */
-static int do_rx_urg_notify(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+static int
+do_rx_urg_notify(struct toedev *cdev, struct mbuf *m, void *ctx)
 {
-	struct sock *sk = (struct sock *)ctx;
+	struct socket *so = (struct socket *)ctx;
 
-	VALIDATE_SOCK(sk);
+	VALIDATE_SOCK(so);
 
-	process_cpl_msg(rx_urg_notify, sk, skb);
+	process_cpl_msg(rx_urg_notify, so, m);
 	return 0;
 }
 
@@ -1433,11 +1481,12 @@
  * counter.  The supplied skb is used to generate the ABORT_REQ message if
  * possible.  Must be called with softirqs disabled.
  */
-static inline void abort_conn(struct sock *sk, struct sk_buff *skb, int mib)
+static inline void
+abort_conn(struct socket *so, struct mbuf *m, int mib)
 {
-	struct sk_buff *abort_skb;
+	struct mbuf *abort_m;
 
-	abort_skb = __get_cpl_reply_skb(skb, sizeof(struct cpl_abort_req),
+	abort_m = __get_cpl_reply_skb(skb, sizeof(struct cpl_abort_req),
 					GFP_ATOMIC);
 	if (abort_skb) {
 		NET_INC_STATS_BH(mib);
@@ -1449,7 +1498,8 @@
  * Returns true if we need to explicitly request RST when we receive new data
  * on an RX-closed connection.
  */
-static inline int need_rst_on_excess_rx(const struct sock *sk)
+static inline int
+need_rst_on_excess_rx(const struct socket *so)
 {
 	return 1;
 }
@@ -1458,7 +1508,8 @@
  * Handles Rx data that arrives in a state where the socket isn't accepting
  * new data.
  */
-static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
+static void
+handle_excess_rx(struct socket *so, struct mbuf *m)
 {
 	if (need_rst_on_excess_rx(sk) && !sock_flag(sk, ABORT_SHUTDOWN))
 		abort_conn(sk, skb, LINUX_MIB_TCPABORTONDATA);
@@ -1470,7 +1521,8 @@
  * Process a get_tcb_rpl as a DDP completion (similar to RX_DDP_COMPLETE)
  * by getting the DDP offset from the TCB.
  */
-static void tcb_rpl_as_ddp_complete(struct sock *sk, struct sk_buff *skb)
+static void
+tcb_rpl_as_ddp_complete(struct socket *so, struct mbuf *m)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct ddp_state *q = DDP_STATE(tp);
@@ -1632,24 +1684,26 @@
  * Process a CPL_GET_TCB_RPL.  These can also be generated by the DDP code,
  * in that case they are similar to DDP completions.
  */
-static int do_get_tcb_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+static int
+do_get_tcb_rpl(struct toedev *cdev, struct mbuf *m, void *ctx)
 {
-	struct sock *sk = (struct sock *)ctx;
+	struct socket *so = (struct sock *)ctx;
 
 	/* OK if socket doesn't exist */
 	if (!sk)
 		return CPL_RET_BUF_DONE;
 
-	process_cpl_msg(tcb_rpl_as_ddp_complete, sk, skb);
+	process_cpl_msg(tcb_rpl_as_ddp_complete, so, m);
 	return 0;
 }
 
-static void handle_ddp_data(struct sock *sk, struct sk_buff *skb)
+static void
+handle_ddp_data(struct socket *so, struct mbuf *m)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct ddp_state *q;
 	struct ddp_buf_state *bsp;
-	struct cpl_rx_data *hdr = cplhdr(skb);
+	struct cpl_rx_data *hdr = cplhdr(m);
 	unsigned int rcv_nxt = ntohl(hdr->seq);
 
 	if (tp->rcv_nxt == rcv_nxt)
@@ -1688,7 +1742,8 @@
 /*
  * Process new data received for a connection.
  */
-static void new_rx_data(struct sock *sk, struct sk_buff *skb)
+static void
+new_rx_data(struct socket *so, struct mbuf *m)
 {
 	struct cpl_rx_data *hdr = cplhdr(skb);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -1748,9 +1803,10 @@
 /*
  * Handler for RX_DATA CPL messages.
  */
-static int do_rx_data(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+static int
+do_rx_data(struct t3cdev *cdev, struct mbuf *skb, void *ctx)
 {
-	struct sock *sk = (struct sock *)ctx;
+	struct socket *so = (struct sock *)ctx;
 
 	VALIDATE_SOCK(sk);
 
@@ -1761,7 +1817,8 @@
 	return 0;
 }
 
-static void new_rx_data_ddp(struct sock *sk, struct sk_buff *skb)
+static void
+new_rx_data_ddp(struct socket *so, struct mbuf *skb)
 {
 	struct tcp_sock *tp;
 	struct ddp_state *q;
@@ -1855,10 +1912,11 @@
 /*
  * Handler for RX_DATA_DDP CPL messages.
  */
-static int do_rx_data_ddp(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+static int
+do_rx_data_ddp(struct t3cdev *cdev, struct mbuf *m, void *ctx)
 {
-	struct sock *sk = ctx;

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200704280830.l3S8UdgM048387>