summaryrefslogtreecommitdiffstats
path: root/mm/memory-tiers.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory-tiers.c')
-rw-r--r--mm/memory-tiers.c175
1 files changed, 168 insertions, 7 deletions
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 37a4f59d95..8d5291add2 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -5,6 +5,7 @@
#include <linux/kobject.h>
#include <linux/memory.h>
#include <linux/memory-tiers.h>
+#include <linux/notifier.h>
#include "internal.h"
@@ -36,7 +37,7 @@ struct node_memory_type_map {
static DEFINE_MUTEX(memory_tier_lock);
static LIST_HEAD(memory_tiers);
static struct node_memory_type_map node_memory_types[MAX_NUMNODES];
-static struct memory_dev_type *default_dram_type;
+struct memory_dev_type *default_dram_type;
static struct bus_type memory_tier_subsys = {
.name = "memory_tiering",
@@ -105,6 +106,13 @@ static int top_tier_adistance;
static struct demotion_nodes *node_demotion __read_mostly;
#endif /* CONFIG_MIGRATION */
+static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms);
+
+static bool default_dram_perf_error;
+static struct node_hmem_attrs default_dram_perf;
+static int default_dram_perf_ref_nid = NUMA_NO_NODE;
+static const char *default_dram_perf_ref_source;
+
static inline struct memory_tier *to_memory_tier(struct device *device)
{
return container_of(device, struct memory_tier, dev);
@@ -115,7 +123,7 @@ static __always_inline nodemask_t get_memtier_nodemask(struct memory_tier *memti
nodemask_t nodes = NODE_MASK_NONE;
struct memory_dev_type *memtype;
- list_for_each_entry(memtype, &memtier->memory_types, tier_sibiling)
+ list_for_each_entry(memtype, &memtier->memory_types, tier_sibling)
nodes_or(nodes, nodes, memtype->nodes);
return nodes;
@@ -174,7 +182,7 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
* If the memtype is already part of a memory tier,
* just return that.
*/
- if (!list_empty(&memtype->tier_sibiling)) {
+ if (!list_empty(&memtype->tier_sibling)) {
list_for_each_entry(memtier, &memory_tiers, list) {
if (adistance == memtier->adistance_start)
return memtier;
@@ -218,7 +226,7 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
memtier = new_memtier;
link_memtype:
- list_add(&memtype->tier_sibiling, &memtier->memory_types);
+ list_add(&memtype->tier_sibling, &memtier->memory_types);
return memtier;
}
@@ -527,7 +535,7 @@ static bool clear_node_memory_tier(int node)
memtype = node_memory_types[node].memtype;
node_clear(node, memtype->nodes);
if (nodes_empty(memtype->nodes)) {
- list_del_init(&memtype->tier_sibiling);
+ list_del_init(&memtype->tier_sibling);
if (list_empty(&memtier->memory_types))
destroy_memory_tier(memtier);
}
@@ -553,7 +561,7 @@ struct memory_dev_type *alloc_memory_type(int adistance)
return ERR_PTR(-ENOMEM);
memtype->adistance = adistance;
- INIT_LIST_HEAD(&memtype->tier_sibiling);
+ INIT_LIST_HEAD(&memtype->tier_sibling);
memtype->nodes = NODE_MASK_NONE;
kref_init(&memtype->kref);
return memtype;
@@ -578,13 +586,14 @@ EXPORT_SYMBOL_GPL(init_node_memory_type);
void clear_node_memory_type(int node, struct memory_dev_type *memtype)
{
mutex_lock(&memory_tier_lock);
- if (node_memory_types[node].memtype == memtype)
+ if (node_memory_types[node].memtype == memtype || !memtype)
node_memory_types[node].map_count--;
/*
* If we umapped all the attached devices to this node,
* clear the node memory type.
*/
if (!node_memory_types[node].map_count) {
+ memtype = node_memory_types[node].memtype;
node_memory_types[node].memtype = NULL;
put_memory_type(memtype);
}
@@ -592,6 +601,158 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype)
}
EXPORT_SYMBOL_GPL(clear_node_memory_type);
+static void dump_hmem_attrs(struct node_hmem_attrs *attrs, const char *prefix)
+{
+ pr_info(
+"%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n",
+ prefix, attrs->read_latency, attrs->write_latency,
+ attrs->read_bandwidth, attrs->write_bandwidth);
+}
+
+int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+ const char *source)
+{
+ int rc = 0;
+
+ mutex_lock(&memory_tier_lock);
+ if (default_dram_perf_error) {
+ rc = -EIO;
+ goto out;
+ }
+
+ if (perf->read_latency + perf->write_latency == 0 ||
+ perf->read_bandwidth + perf->write_bandwidth == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (default_dram_perf_ref_nid == NUMA_NO_NODE) {
+ default_dram_perf = *perf;
+ default_dram_perf_ref_nid = nid;
+ default_dram_perf_ref_source = kstrdup(source, GFP_KERNEL);
+ goto out;
+ }
+
+ /*
+ * The performance of all default DRAM nodes is expected to be
+ * same (that is, the variation is less than 10%). And it
+ * will be used as base to calculate the abstract distance of
+ * other memory nodes.
+ */
+ if (abs(perf->read_latency - default_dram_perf.read_latency) * 10 >
+ default_dram_perf.read_latency ||
+ abs(perf->write_latency - default_dram_perf.write_latency) * 10 >
+ default_dram_perf.write_latency ||
+ abs(perf->read_bandwidth - default_dram_perf.read_bandwidth) * 10 >
+ default_dram_perf.read_bandwidth ||
+ abs(perf->write_bandwidth - default_dram_perf.write_bandwidth) * 10 >
+ default_dram_perf.write_bandwidth) {
+ pr_info(
+"memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
+"DRAM node %d.\n", nid, default_dram_perf_ref_nid);
+ pr_info(" performance of reference DRAM node %d:\n",
+ default_dram_perf_ref_nid);
+ dump_hmem_attrs(&default_dram_perf, " ");
+ pr_info(" performance of DRAM node %d:\n", nid);
+ dump_hmem_attrs(perf, " ");
+ pr_info(
+" disable default DRAM node performance based abstract distance algorithm.\n");
+ default_dram_perf_error = true;
+ rc = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&memory_tier_lock);
+ return rc;
+}
+
+int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
+{
+ if (default_dram_perf_error)
+ return -EIO;
+
+ if (default_dram_perf_ref_nid == NUMA_NO_NODE)
+ return -ENOENT;
+
+ if (perf->read_latency + perf->write_latency == 0 ||
+ perf->read_bandwidth + perf->write_bandwidth == 0)
+ return -EINVAL;
+
+ mutex_lock(&memory_tier_lock);
+ /*
+ * The abstract distance of a memory node is in direct proportion to
+ * its memory latency (read + write) and inversely proportional to its
+ * memory bandwidth (read + write). The abstract distance, memory
+ * latency, and memory bandwidth of the default DRAM nodes are used as
+ * the base.
+ */
+ *adist = MEMTIER_ADISTANCE_DRAM *
+ (perf->read_latency + perf->write_latency) /
+ (default_dram_perf.read_latency + default_dram_perf.write_latency) *
+ (default_dram_perf.read_bandwidth + default_dram_perf.write_bandwidth) /
+ (perf->read_bandwidth + perf->write_bandwidth);
+ mutex_unlock(&memory_tier_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt_perf_to_adistance);
+
+/**
+ * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm
+ * @nb: The notifier block which describe the algorithm
+ *
+ * Return: 0 on success, errno on error.
+ *
+ * Every memory tiering abstract distance algorithm provider needs to
+ * register the algorithm with register_mt_adistance_algorithm(). To
+ * calculate the abstract distance for a specified memory node, the
+ * notifier function will be called unless some high priority
+ * algorithm has provided result. The prototype of the notifier
+ * function is as follows,
+ *
+ * int (*algorithm_notifier)(struct notifier_block *nb,
+ * unsigned long nid, void *data);
+ *
+ * Where "nid" specifies the memory node, "data" is the pointer to the
+ * returned abstract distance (that is, "int *adist"). If the
+ * algorithm provides the result, NOTIFY_STOP should be returned.
+ * Otherwise, return_value & %NOTIFY_STOP_MASK == 0 to allow the next
+ * algorithm in the chain to provide the result.
+ */
+int register_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&mt_adistance_algorithms, nb);
+}
+EXPORT_SYMBOL_GPL(register_mt_adistance_algorithm);
+
+/**
+ * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm
+ * @nb: the notifier block which describe the algorithm
+ *
+ * Return: 0 on success, errno on error.
+ */
+int unregister_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&mt_adistance_algorithms, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_mt_adistance_algorithm);
+
+/**
+ * mt_calc_adistance() - Calculate abstract distance with registered algorithms
+ * @node: the node to calculate abstract distance for
+ * @adist: the returned abstract distance
+ *
+ * Return: if return_value & %NOTIFY_STOP_MASK != 0, then some
+ * abstract distance algorithm provides the result, and return it via
+ * @adist. Otherwise, no algorithm can provide the result and @adist
+ * will be kept as it is.
+ */
+int mt_calc_adistance(int node, int *adist)
+{
+ return blocking_notifier_call_chain(&mt_adistance_algorithms, node, adist);
+}
+EXPORT_SYMBOL_GPL(mt_calc_adistance);
+
static int __meminit memtier_hotplug_callback(struct notifier_block *self,
unsigned long action, void *_arg)
{