Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 27 Jan 2017 11:19:07 +0000 (UTC)
From:      Hans Petter Selasky <hselasky@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r312880 - in head/sys/dev/mlx5: . mlx5_core
Message-ID:  <201701271119.v0RBJ7P9051168@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: hselasky
Date: Fri Jan 27 11:19:06 2017
New Revision: 312880
URL: https://svnweb.freebsd.org/changeset/base/312880

Log:
  Wait for all VFs pages to be reclaimed before closing EQ pages.
  
  MFC after:		1 week
  Sponsored by:		Mellanox Technologies

Modified:
  head/sys/dev/mlx5/driver.h
  head/sys/dev/mlx5/mlx5_core/mlx5_main.c
  head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c

Modified: head/sys/dev/mlx5/driver.h
==============================================================================
--- head/sys/dev/mlx5/driver.h	Fri Jan 27 11:03:58 2017	(r312879)
+++ head/sys/dev/mlx5/driver.h	Fri Jan 27 11:19:06 2017	(r312880)
@@ -43,6 +43,7 @@
 #include <dev/mlx5/doorbell.h>
 
 #define MLX5_QCOUNTER_SETS_NETDEV 64
+#define MLX5_MAX_NUMBER_OF_VFS 128
 
 enum {
 	MLX5_BOARD_ID_LEN = 64,
@@ -521,7 +522,7 @@ struct mlx5_priv {
 	s64			fw_pages;
 	atomic_t		reg_pages;
 	struct list_head	free_list;
-
+	s64			pages_per_func[MLX5_MAX_NUMBER_OF_VFS];
 	struct mlx5_core_health health;
 
 	struct mlx5_srq_table	srq_table;
@@ -850,6 +851,7 @@ void mlx5_core_req_pages_handler(struct 
 				 s32 npages);
 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
+s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);
 void mlx5_unregister_debugfs(void);
 int mlx5_eq_init(struct mlx5_core_dev *dev);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_main.c	Fri Jan 27 11:03:58 2017	(r312879)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_main.c	Fri Jan 27 11:19:06 2017	(r312880)
@@ -853,6 +853,7 @@ static void mlx5_dev_cleanup(struct mlx5
 	mlx5_cleanup_qp_table(dev);
 	mlx5_cleanup_cq_table(dev);
 	unmap_bf_area(dev);
+	mlx5_wait_for_reclaim_vfs_pages(dev);
 	free_comp_eqs(dev);
 	mlx5_stop_eqs(dev);
 	mlx5_free_uuars(dev, &priv->uuari);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c	Fri Jan 27 11:03:58 2017	(r312879)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c	Fri Jan 27 11:19:06 2017	(r312880)
@@ -27,6 +27,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <dev/mlx5/driver.h>
 #include "mlx5_core.h"
 
@@ -282,6 +283,7 @@ retry:
 		goto out_alloc;
 	}
 	dev->priv.fw_pages += npages;
+	dev->priv.pages_per_func[func_id] += npages;
 
 	if (out.hdr.status) {
 		err = mlx5_cmd_status_to_err(&out.hdr);
@@ -355,7 +357,7 @@ static int reclaim_pages(struct mlx5_cor
 		*nclaimed = num_claimed;
 
 	dev->priv.fw_pages -= num_claimed;
-
+	dev->priv.pages_per_func[func_id] -= num_claimed;
 	for (i = 0; i < num_claimed; i++) {
 		addr = be64_to_cpu(out->pas[i]);
 		free_4k(dev, addr);
@@ -423,6 +425,31 @@ enum {
 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
 };
 
+s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
+{
+	int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+	s64 prevpages = 0;
+	s64 npages = 0;
+
+	while (!time_after(jiffies, end)) {
+		/* exclude own function, VFs only */
+		npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
+		if (!npages)
+			break;
+
+		if (npages != prevpages)
+			end = end + msecs_to_jiffies(100);
+
+		prevpages = npages;
+		msleep(1);
+	}
+
+	if (npages)
+		mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
+
+	return -npages;
+}
+
 static int optimal_reclaimed_pages(void)
 {
 	struct mlx5_cmd_prot_block *block;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201701271119.v0RBJ7P9051168>