summaryrefslogtreecommitdiffstats
path: root/ml/Dimension.cc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2022-11-30 18:47:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2022-11-30 18:47:05 +0000
commit97e01009d69b8fbebfebf68f51e3d126d0ed43fc (patch)
tree02e8b836c3a9d89806f3e67d4a5fe9f52dbb0061 /ml/Dimension.cc
parentReleasing debian version 1.36.1-1. (diff)
downloadnetdata-97e01009d69b8fbebfebf68f51e3d126d0ed43fc.tar.xz
netdata-97e01009d69b8fbebfebf68f51e3d126d0ed43fc.zip
Merging upstream version 1.37.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ml/Dimension.cc')
-rw-r--r--ml/Dimension.cc76
1 files changed, 57 insertions, 19 deletions
diff --git a/ml/Dimension.cc b/ml/Dimension.cc
index 0fe07530..bf34abb7 100644
--- a/ml/Dimension.cc
+++ b/ml/Dimension.cc
@@ -6,8 +6,13 @@
using namespace ml;
-std::pair<CalculatedNumber *, size_t>
-TrainableDimension::getCalculatedNumbers() {
+bool Dimension::isActive() const {
+ bool SetObsolete = rrdset_flag_check(RD->rrdset, RRDSET_FLAG_OBSOLETE);
+ bool DimObsolete = rrddim_flag_check(RD, RRDDIM_FLAG_OBSOLETE);
+ return !SetObsolete && !DimObsolete;
+}
+
+std::pair<CalculatedNumber *, size_t> Dimension::getCalculatedNumbers() {
size_t MinN = Cfg.MinTrainSamples;
size_t MaxN = Cfg.MaxTrainSamples;
@@ -68,7 +73,7 @@ TrainableDimension::getCalculatedNumbers() {
return { CNs, TotalValues };
}
-MLResult TrainableDimension::trainModel() {
+MLResult Dimension::trainModel() {
auto P = getCalculatedNumbers();
CalculatedNumber *CNs = P.first;
unsigned N = P.second;
@@ -81,7 +86,15 @@ MLResult TrainableDimension::trainModel() {
SamplesBuffer SB = SamplesBuffer(CNs, N, 1, Cfg.DiffN, Cfg.SmoothN, Cfg.LagN,
SamplingRatio, Cfg.RandomNums);
- KM.train(SB, Cfg.MaxKMeansIters);
+ std::vector<DSample> Samples = SB.preprocess();
+
+ KMeans KM;
+ KM.train(Samples, Cfg.MaxKMeansIters);
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Models[0] = KM;
+ }
Trained = true;
ConstantModel = true;
@@ -90,16 +103,25 @@ MLResult TrainableDimension::trainModel() {
return MLResult::Success;
}
-void PredictableDimension::addValue(CalculatedNumber Value, bool Exists) {
+bool Dimension::shouldTrain(const TimePoint &TP) const {
+ if (ConstantModel)
+ return false;
+
+ return (LastTrainedAt + Seconds(Cfg.TrainEvery * updateEvery())) < TP;
+}
+
+bool Dimension::predict(CalculatedNumber Value, bool Exists) {
if (!Exists) {
CNs.clear();
- return;
+ AnomalyBit = false;
+ return false;
}
unsigned N = Cfg.DiffN + Cfg.SmoothN + Cfg.LagN;
if (CNs.size() < N) {
CNs.push_back(Value);
- return;
+ AnomalyBit = false;
+ return false;
}
std::rotate(std::begin(CNs), std::begin(CNs) + 1, std::end(CNs));
@@ -108,28 +130,44 @@ void PredictableDimension::addValue(CalculatedNumber Value, bool Exists) {
ConstantModel = false;
CNs[N - 1] = Value;
-}
-std::pair<MLResult, bool> PredictableDimension::predict() {
- unsigned N = Cfg.DiffN + Cfg.SmoothN + Cfg.LagN;
- if (CNs.size() != N) {
+ if (!isTrained() || ConstantModel) {
AnomalyBit = false;
- return { MLResult::MissingData, AnomalyBit };
+ return false;
}
CalculatedNumber *TmpCNs = new CalculatedNumber[N * (Cfg.LagN + 1)]();
std::memcpy(TmpCNs, CNs.data(), N * sizeof(CalculatedNumber));
-
- SamplesBuffer SB = SamplesBuffer(TmpCNs, N, 1, Cfg.DiffN, Cfg.SmoothN, Cfg.LagN,
+ SamplesBuffer SB = SamplesBuffer(TmpCNs, N, 1,
+ Cfg.DiffN, Cfg.SmoothN, Cfg.LagN,
1.0, Cfg.RandomNums);
- AnomalyScore = computeAnomalyScore(SB);
+ const DSample Sample = SB.preprocess().back();
delete[] TmpCNs;
- if (AnomalyScore == std::numeric_limits<CalculatedNumber>::quiet_NaN()) {
+ std::unique_lock<std::mutex> Lock(Mutex, std::defer_lock);
+ if (!Lock.try_lock()) {
AnomalyBit = false;
- return { MLResult::NaN, AnomalyBit };
+ return false;
}
- AnomalyBit = AnomalyScore >= (100 * Cfg.DimensionAnomalyScoreThreshold);
- return { MLResult::Success, AnomalyBit };
+ for (const auto &KM : Models) {
+ double AnomalyScore = KM.anomalyScore(Sample);
+ if (AnomalyScore == std::numeric_limits<CalculatedNumber>::quiet_NaN()) {
+ AnomalyBit = false;
+ continue;
+ }
+
+ if (AnomalyScore < (100 * Cfg.DimensionAnomalyScoreThreshold)) {
+ AnomalyBit = false;
+ return false;
+ }
+ }
+
+ AnomalyBit = true;
+ return true;
+}
+
+std::array<KMeans, 1> Dimension::getModels() {
+ std::unique_lock<std::mutex> Lock(Mutex);
+ return Models;
}