From ac1154f4606d65a1693cb728325b9a06cfbfe001 Mon Sep 17 00:00:00 2001 From: Jason Mowry Date: Thu, 23 Jun 2022 06:21:51 -0400 Subject: [PATCH] fix: Reduce CPU usage on Scraper agent scraping large set of Azure targets (#2050) * Revise Promitor scraper task scheduling to organize Cron jobs by schedule instead of resource parameters, then handle multiple resources and/or resource discovery groups per job. Optionally enforce maximum degree of parallelism across all Cron jobs using a mutex shared across the jobs where each operation that requires network access to interact with the cluster is run on the thread pool and each counts as 1 against the degree of parallelism. # Conflicts: # src/Promitor.Agents.Scraper/Promitor.Agents.Scraper.csproj # src/Promitor.Agents.Scraper/Scheduling/SchedulingExtensions.cs * Begin addressing outstanding code scan issues. * Additional modifications per PR feedback to incorporate more unit tests and match styling. Additionally removed dulicate constants declarations for consistency. * Additional code analysis warnings remediated. * Update src/Promitor.Core.Scraping/Configuration/Model/MetricDimension.cs Co-authored-by: Tom Kerkhove * Further PR feedback * Updated changelog. * Why is a markdown file limited to a maximum line length? (one smaller than that used by some IDEs for that matter) * Someone is going to have to explain to me the intent behind linting a documentation file for coding style. This just seems counterproductive. * Updating unit test organization per PR feedback Co-authored-by: Tom Kerkhove --- changelog/content/experimental/unreleased.md | 2 + src/Promitor.Agents.Core/AgentProgram.cs | 2 +- .../Configuration/Defaults.cs | 10 +- .../Server/ServerConfiguration.cs | 6 + .../AzureMonitorClientFactory.cs | 10 +- .../Configuration/Defaults.cs | 26 +- src/Promitor.Agents.Scraper/Docs/Open-Api.xml | 226 ++++++++++++- .../IServiceCollectionExtensions.cs | 33 +- .../Promitor.Agents.Scraper.csproj | 1 + .../Scheduling/IScrapingMutex.cs | 182 ++++++++++ .../ResourceDiscoveryGroupScrapingJob.cs | 133 -------- .../Scheduling/ResourceScrapingJob.cs | 71 ---- .../Scheduling/ResourcesScrapingJob.cs | 311 ++++++++++++++++++ .../Scheduling/SchedulingExtensions.cs | 174 +++++----- .../Scheduling/ScrapingMutex.cs | 18 + src/Promitor.Agents.Scraper/Startup.cs | 1 + .../Configuration/Model/MetricDimension.cs | 32 +- .../Configuration/Model/Scraping.cs | 32 +- .../AzureMonitorClient.cs | 14 +- .../RuntimeConfigurationUnitTest.cs | 50 ++- .../Model/Metrics/MetricDefinitionTests.cs | 44 +++ ...gusScraperRuntimeConfigurationGenerator.cs | 1 + .../Config/RuntimeConfigurationGenerator.cs | 19 +- 23 files changed, 1036 insertions(+), 362 deletions(-) create mode 100644 src/Promitor.Agents.Scraper/Scheduling/IScrapingMutex.cs delete mode 100644 src/Promitor.Agents.Scraper/Scheduling/ResourceDiscoveryGroupScrapingJob.cs delete mode 100644 src/Promitor.Agents.Scraper/Scheduling/ResourceScrapingJob.cs create mode 100644 src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs create mode 100644 src/Promitor.Agents.Scraper/Scheduling/ScrapingMutex.cs diff --git a/changelog/content/experimental/unreleased.md b/changelog/content/experimental/unreleased.md index d82cd77ec..1ff2ddbb7 100644 --- a/changelog/content/experimental/unreleased.md +++ b/changelog/content/experimental/unreleased.md @@ -9,6 +9,8 @@ version: - {{% tag added %}} Provide scraper for Azure Database for MySQL Servers ([docs](https://docs.promitor.io/v2.x/scraping/providers/mysql/) | [#1880](https://github.com/tomkerkhove/promitor/issues/324)) - {{% tag fixed %}} Honor flag not to include timestamps in system metrics for Prometheus ([#1915](https://github.com/tomkerkhove/promitor/pull/1915)) +- {{% tag fixed %}} Performance degradation caused by high CPU usage when Promitor-agent-scraper has to scrape large + set of Azure targets ([#1834](https://github.com/tomkerkhove/promitor/pull/2050)) #### Resource Discovery diff --git a/src/Promitor.Agents.Core/AgentProgram.cs b/src/Promitor.Agents.Core/AgentProgram.cs index 5c8cf52bb..c69c56aa0 100644 --- a/src/Promitor.Agents.Core/AgentProgram.cs +++ b/src/Promitor.Agents.Core/AgentProgram.cs @@ -46,7 +46,7 @@ protected static int DetermineHttpPort(ServerConfiguration serverConfiguration) { Guard.NotNull(serverConfiguration, nameof(serverConfiguration)); - return serverConfiguration?.HttpPort ?? 80; + return serverConfiguration?.HttpPort ?? Configuration.Defaults.Server.HttpPort; } /// diff --git a/src/Promitor.Agents.Core/Configuration/Defaults.cs b/src/Promitor.Agents.Core/Configuration/Defaults.cs index 531710526..1e13e16c2 100644 --- a/src/Promitor.Agents.Core/Configuration/Defaults.cs +++ b/src/Promitor.Agents.Core/Configuration/Defaults.cs @@ -1,4 +1,5 @@ -using Microsoft.Extensions.Logging; +using System; +using Microsoft.Extensions.Logging; namespace Promitor.Agents.Core.Configuration { @@ -7,6 +8,13 @@ public static class Defaults public static class Server { public static int HttpPort { get; } = 80; + + /// + /// Default upper limit on the number of concurrent threads between all possible scheduled concurrent scraping jobs, + /// set to a reasonable load per CPU so as not to choke the system with processing overhead while attempting to + /// communicate with cluster hosts and awaiting multiple outstanding API calls. + /// + public static int MaxDegreeOfParallelism { get; } = Environment.ProcessorCount * 8; } public class Telemetry diff --git a/src/Promitor.Agents.Core/Configuration/Server/ServerConfiguration.cs b/src/Promitor.Agents.Core/Configuration/Server/ServerConfiguration.cs index 2ca4f19be..8187b3fd2 100644 --- a/src/Promitor.Agents.Core/Configuration/Server/ServerConfiguration.cs +++ b/src/Promitor.Agents.Core/Configuration/Server/ServerConfiguration.cs @@ -3,5 +3,11 @@ public class ServerConfiguration { public int HttpPort { get; set; } = Defaults.Server.HttpPort; + + /// + /// Upper limit on the number of concurrent threads between all possible scheduled scraping jobs, + /// where 0 or negative is interpreted as unlimited. + /// + public int MaxDegreeOfParallelism { get; set; } = Defaults.Server.MaxDegreeOfParallelism; } } diff --git a/src/Promitor.Agents.Scraper/AzureMonitorClientFactory.cs b/src/Promitor.Agents.Scraper/AzureMonitorClientFactory.cs index 04df7559d..2a157903d 100644 --- a/src/Promitor.Agents.Scraper/AzureMonitorClientFactory.cs +++ b/src/Promitor.Agents.Scraper/AzureMonitorClientFactory.cs @@ -24,27 +24,27 @@ public class AzureMonitorClientFactory /// Id of the Azure subscription /// Writer to send metrics to all configured sinks /// Metrics collector to write metrics to Prometheus - /// Memory cache to store items in + /// Memory cache to store items in /// Configuration of Promitor /// Options for Azure Monitor logging /// Factory to create loggers with - public AzureMonitorClient CreateIfNotExists(AzureEnvironment cloud, string tenantId, string subscriptionId, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, IMemoryCache memoryCache, IConfiguration configuration, IOptions azureMonitorLoggingConfiguration, ILoggerFactory loggerFactory) + public AzureMonitorClient CreateIfNotExists(AzureEnvironment cloud, string tenantId, string subscriptionId, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, IMemoryCache resourceMetricDefinitionMemoryCache, IConfiguration configuration, IOptions azureMonitorLoggingConfiguration, ILoggerFactory loggerFactory) { if (_azureMonitorClients.ContainsKey(subscriptionId)) { return _azureMonitorClients[subscriptionId]; } - var azureMonitorClient = CreateNewAzureMonitorClient(cloud, tenantId, subscriptionId, metricSinkWriter, azureScrapingPrometheusMetricsCollector, memoryCache, configuration, azureMonitorLoggingConfiguration, loggerFactory); + var azureMonitorClient = CreateNewAzureMonitorClient(cloud, tenantId, subscriptionId, metricSinkWriter, azureScrapingPrometheusMetricsCollector, resourceMetricDefinitionMemoryCache, configuration, azureMonitorLoggingConfiguration, loggerFactory); _azureMonitorClients.TryAdd(subscriptionId, azureMonitorClient); return azureMonitorClient; } - private static AzureMonitorClient CreateNewAzureMonitorClient(AzureEnvironment cloud, string tenantId, string subscriptionId, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, IMemoryCache memoryCache, IConfiguration configuration, IOptions azureMonitorLoggingConfiguration, ILoggerFactory loggerFactory) + private static AzureMonitorClient CreateNewAzureMonitorClient(AzureEnvironment cloud, string tenantId, string subscriptionId, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, IMemoryCache resourceMetricDefinitionMemoryCache, IConfiguration configuration, IOptions azureMonitorLoggingConfiguration, ILoggerFactory loggerFactory) { var azureCredentials = AzureAuthenticationFactory.GetConfiguredAzureAuthentication(configuration); - var azureMonitorClient = new AzureMonitorClient(cloud, tenantId, subscriptionId, azureCredentials, metricSinkWriter, azureScrapingPrometheusMetricsCollector, memoryCache, loggerFactory, azureMonitorLoggingConfiguration); + var azureMonitorClient = new AzureMonitorClient(cloud, tenantId, subscriptionId, azureCredentials, metricSinkWriter, azureScrapingPrometheusMetricsCollector, resourceMetricDefinitionMemoryCache, loggerFactory, azureMonitorLoggingConfiguration); return azureMonitorClient; } } diff --git a/src/Promitor.Agents.Scraper/Configuration/Defaults.cs b/src/Promitor.Agents.Scraper/Configuration/Defaults.cs index d4717f538..79c218662 100644 --- a/src/Promitor.Agents.Scraper/Configuration/Defaults.cs +++ b/src/Promitor.Agents.Scraper/Configuration/Defaults.cs @@ -1,14 +1,7 @@ -using Microsoft.Extensions.Logging; - -namespace Promitor.Agents.Scraper.Configuration +namespace Promitor.Agents.Scraper.Configuration { public static class Defaults { - public static class Server - { - public static int HttpPort { get; } = 80; - } - public static class Prometheus { public static bool EnableMetricTimestamps { get; set; } = false; @@ -20,22 +13,5 @@ public static class MetricsConfiguration { public static string AbsolutePath { get; } = "/config/metrics-declaration.yaml"; } - - public class Telemetry - { - public static LogLevel? DefaultVerbosity { get; set; } = LogLevel.Error; - - public class ContainerLogs - { - public static LogLevel? Verbosity { get; set; } = null; - public static bool IsEnabled { get; set; } = true; - } - - public class ApplicationInsights - { - public static LogLevel? Verbosity { get; set; } = null; - public static bool IsEnabled { get; set; } = false; - } - } } } diff --git a/src/Promitor.Agents.Scraper/Docs/Open-Api.xml b/src/Promitor.Agents.Scraper/Docs/Open-Api.xml index 62ba9d2bf..d77560756 100644 --- a/src/Promitor.Agents.Scraper/Docs/Open-Api.xml +++ b/src/Promitor.Agents.Scraper/Docs/Open-Api.xml @@ -13,7 +13,7 @@ Id of the Azure subscription Writer to send metrics to all configured sinks Metrics collector to write metrics to Prometheus - Memory cache to store items in + Memory cache to store items in Configuration of Promitor Options for Azure Monitor logging Factory to create loggers with @@ -64,6 +64,225 @@ Type of the object to be cloned Initial object to clone + + + Limits the number of threads for scraping tasks that can access the thread pool concurrently. + + + + + The current count of the . + + + + + Returns a that can be used to wait on the . + + + + + Blocks the current thread until it can enter the . + + + + + Blocks the current thread until it can enter the , + while observing a . + + + The token to observe. + + + + + Blocks the current thread until it can enter the , + using a to measure the time interval. + + + A that represents the number of milliseconds to wait, + or a that represents -1 milliseconds to wait indefinitely. + + + true if the current thread successfully entered the ; otherwise, false. + + + + + Blocks the current thread until it can enter the , + using a to measure the time interval, while observing a + . + + + A that represents the number of milliseconds to wait, + or a that represents -1 milliseconds to wait indefinitely. + + + The to observe. + + + true if the current thread successfully entered the ; otherwise, false. + + + + + Blocks the current thread until it can enter the , + using a 32-bit signed integer to measure the time interval. + + + The number of milliseconds to wait, or (-1) to wait indefinitely. + + + true if the current thread successfully entered the ; otherwise, false. + + + + + Blocks the current thread until it can enter the , + using a 32-bit signed integer to measure the time interval, + while observing a . + + + The number of milliseconds to wait, or (-1) to wait indefinitely. + + + The to observe. + + + true if the current thread successfully entered the ; otherwise, false. + + + + + Asynchronously waits to enter the . + + A task that will complete when the semaphore has been entered. + + + + Asynchronously waits to enter the , while observing a + . + + A task that will complete when the semaphore has been entered. + + The token to observe. + + + + + Asynchronously waits to enter the , + using a 32-bit signed integer to measure the time interval. + + + The number of milliseconds to wait, or (-1) to wait indefinitely. + + + A task that will complete with a result of true if the current thread successfully entered + the , otherwise with a result of false. + + + + + Asynchronously waits to enter the , + using a to measure the time interval, while observing a + . + + + A that represents the number of milliseconds + to wait, or a that represents -1 milliseconds to wait indefinitely. + + + A task that will complete with a result of true if the current thread successfully entered + the , otherwise with a result of false. + + + + + Asynchronously waits to enter the , + using a to measure the time interval. + + + A that represents the number of milliseconds + to wait, or a that represents -1 milliseconds to wait indefinitely. + + + The token to observe. + + + A task that will complete with a result of true if the current thread successfully entered + the , otherwise with a result of false. + + + + + Asynchronously waits to enter the , + using a 32-bit signed integer to measure the time interval, + while observing a . + + + The number of milliseconds to wait, or (-1) to wait indefinitely. + + The to observe. + + A task that will complete with a result of true if the current thread successfully entered + the , otherwise with a result of false. + + + + + Exits the once. + + The previous count of the . + + + + Exits the a specified number of times. + + The number of times to exit the semaphore. + The previous count of the . + + + + A metrics scraping job for one or more resources, either enumerated specifically or + identified via resource definition groups. All metrics included are expected to have + the same scraping schedule. + + + + + Create a metrics scraping job for one or more resources, either enumerated specifically or + identified via resource definition groups. All metrics included are expected to have + the same scraping schedule. + + name of scheduled job + declaration of which metrics to collect from which resources + repository source for discovering resources via criteria + destination for metric reporting output + means of obtaining a metrics scraper for a particular type of resource + means of obtaining a Azure Monitor client + metrics collector to write metrics to Prometheus + cache of metric definitions by resource ID + semaphore used to limit concurrency of tasks if configured, or null for no limiting + Promitor configuration + options for Azure Monitor logging + means to obtain a logger + logger to use for scraping detail + + + + Run some task work in the thread pool, but only allow a limited number of threads to go at a time + (unless max degree of parallelism wasn't configured, in which case mutex is null and no limit is imposed). + + + + + + + + Initializes a new instance of the class, specifying + the number scraping tasks that can be processed concurrently. + + The number scraping tasks that can be processed concurrently. + Plots all configured metric information into an ASCII table @@ -156,6 +375,11 @@ Collections of services in application Configuration of the application + + + Adds a semaphore-based implementation of to the . + + Inject configuration diff --git a/src/Promitor.Agents.Scraper/Extensions/IServiceCollectionExtensions.cs b/src/Promitor.Agents.Scraper/Extensions/IServiceCollectionExtensions.cs index b3afef65e..1cb399f2d 100644 --- a/src/Promitor.Agents.Scraper/Extensions/IServiceCollectionExtensions.cs +++ b/src/Promitor.Agents.Scraper/Extensions/IServiceCollectionExtensions.cs @@ -1,4 +1,5 @@ -using GuardNet; +using System; +using GuardNet; using JustEat.StatsD; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection.Extensions; @@ -14,6 +15,7 @@ using Promitor.Agents.Scraper.Configuration; using Promitor.Agents.Scraper.Configuration.Sinks; using Promitor.Agents.Scraper.Discovery; +using Promitor.Agents.Scraper.Scheduling; using Promitor.Agents.Scraper.Usability; using Promitor.Agents.Scraper.Validation.Steps; using Promitor.Agents.Scraper.Validation.Steps.Sinks; @@ -209,6 +211,35 @@ private static void AddStatsdMetricSink(IServiceCollection services, StatsdSinkC }); } + /// + /// Adds a semaphore-based implementation of to the . + /// + public static IServiceCollection AddScrapingMutex(this IServiceCollection services, IConfiguration configuration) + { + if (services == null) + { + throw new ArgumentNullException(nameof(services)); + } + + if (configuration == null) + { + throw new ArgumentNullException(nameof(configuration)); + } + + var serverConfiguration = configuration.GetSection("server").Get(); + + services.TryAdd(ServiceDescriptor.Singleton(_ => ScrapingMutexBuilder(serverConfiguration))); + + return services; + } + + private static ScrapingMutex ScrapingMutexBuilder(ServerConfiguration serverConfiguration) + { + return serverConfiguration.MaxDegreeOfParallelism > 0 + ? new ScrapingMutex(serverConfiguration.MaxDegreeOfParallelism) + : null; + } + /// /// Inject configuration /// diff --git a/src/Promitor.Agents.Scraper/Promitor.Agents.Scraper.csproj b/src/Promitor.Agents.Scraper/Promitor.Agents.Scraper.csproj index 9e5e05b15..c0226dd2a 100644 --- a/src/Promitor.Agents.Scraper/Promitor.Agents.Scraper.csproj +++ b/src/Promitor.Agents.Scraper/Promitor.Agents.Scraper.csproj @@ -36,6 +36,7 @@ + diff --git a/src/Promitor.Agents.Scraper/Scheduling/IScrapingMutex.cs b/src/Promitor.Agents.Scraper/Scheduling/IScrapingMutex.cs new file mode 100644 index 000000000..2b040a135 --- /dev/null +++ b/src/Promitor.Agents.Scraper/Scheduling/IScrapingMutex.cs @@ -0,0 +1,182 @@ +using System; +using System.Threading; +using System.Threading.Tasks; + +namespace Promitor.Agents.Scraper.Scheduling; + +/// +/// Limits the number of threads for scraping tasks that can access the thread pool concurrently. +/// +public interface IScrapingMutex +{ + /// + /// The current count of the . + /// + int CurrentCount { get; } + + /// + /// Returns a that can be used to wait on the . + /// + WaitHandle AvailableWaitHandle { get; } + + /// + /// Blocks the current thread until it can enter the . + /// + void Wait(); + + /// + /// Blocks the current thread until it can enter the , + /// while observing a . + /// + /// + /// The token to observe. + /// + void Wait(CancellationToken cancellationToken); + + /// + /// Blocks the current thread until it can enter the , + /// using a to measure the time interval. + /// + /// + /// A that represents the number of milliseconds to wait, + /// or a that represents -1 milliseconds to wait indefinitely. + /// + /// + /// true if the current thread successfully entered the ; otherwise, false. + /// + bool Wait(TimeSpan timeout); + + /// + /// Blocks the current thread until it can enter the , + /// using a to measure the time interval, while observing a + /// . + /// + /// + /// A that represents the number of milliseconds to wait, + /// or a that represents -1 milliseconds to wait indefinitely. + /// + /// + /// The to observe. + /// + /// + /// true if the current thread successfully entered the ; otherwise, false. + /// + bool Wait(TimeSpan timeout, CancellationToken cancellationToken); + + /// + /// Blocks the current thread until it can enter the , + /// using a 32-bit signed integer to measure the time interval. + /// + /// + /// The number of milliseconds to wait, or (-1) to wait indefinitely. + /// + /// + /// true if the current thread successfully entered the ; otherwise, false. + /// + bool Wait(int millisecondsTimeout); + + /// + /// Blocks the current thread until it can enter the , + /// using a 32-bit signed integer to measure the time interval, + /// while observing a . + /// + /// + /// The number of milliseconds to wait, or (-1) to wait indefinitely. + /// + /// + /// The to observe. + /// + /// + /// true if the current thread successfully entered the ; otherwise, false. + /// + bool Wait(int millisecondsTimeout, CancellationToken cancellationToken); + + /// + /// Asynchronously waits to enter the . + /// + /// A task that will complete when the semaphore has been entered. + Task WaitAsync(); + + /// + /// Asynchronously waits to enter the , while observing a + /// . + /// + /// A task that will complete when the semaphore has been entered. + /// + /// The token to observe. + /// + Task WaitAsync(CancellationToken cancellationToken); + + /// + /// Asynchronously waits to enter the , + /// using a 32-bit signed integer to measure the time interval. + /// + /// + /// The number of milliseconds to wait, or (-1) to wait indefinitely. + /// + /// + /// A task that will complete with a result of true if the current thread successfully entered + /// the , otherwise with a result of false. + /// + Task WaitAsync(int millisecondsTimeout); + + /// + /// Asynchronously waits to enter the , + /// using a to measure the time interval, while observing a + /// . + /// + /// + /// A that represents the number of milliseconds + /// to wait, or a that represents -1 milliseconds to wait indefinitely. + /// + /// + /// A task that will complete with a result of true if the current thread successfully entered + /// the , otherwise with a result of false. + /// + Task WaitAsync(TimeSpan timeout); + + /// + /// Asynchronously waits to enter the , + /// using a to measure the time interval. + /// + /// + /// A that represents the number of milliseconds + /// to wait, or a that represents -1 milliseconds to wait indefinitely. + /// + /// + /// The token to observe. + /// + /// + /// A task that will complete with a result of true if the current thread successfully entered + /// the , otherwise with a result of false. + /// + Task WaitAsync(TimeSpan timeout, CancellationToken cancellationToken); + + /// + /// Asynchronously waits to enter the , + /// using a 32-bit signed integer to measure the time interval, + /// while observing a . + /// + /// + /// The number of milliseconds to wait, or (-1) to wait indefinitely. + /// + /// The to observe. + /// + /// A task that will complete with a result of true if the current thread successfully entered + /// the , otherwise with a result of false. + /// + Task WaitAsync(int millisecondsTimeout, CancellationToken cancellationToken); + + /// + /// Exits the once. + /// + /// The previous count of the . + int Release(); + + /// + /// Exits the a specified number of times. + /// + /// The number of times to exit the semaphore. + /// The previous count of the . + int Release(int releaseCount); +} \ No newline at end of file diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourceDiscoveryGroupScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourceDiscoveryGroupScrapingJob.cs deleted file mode 100644 index 3f835141e..000000000 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourceDiscoveryGroupScrapingJob.cs +++ /dev/null @@ -1,133 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using CronScheduler.Extensions.Scheduler; -using GuardNet; -using Microsoft.Extensions.Caching.Memory; -using Microsoft.Extensions.Configuration; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; -using Promitor.Agents.Scraper.Discovery; -using Promitor.Core.Contracts; -using Promitor.Core.Metrics.Prometheus.Collectors.Interfaces; -using Promitor.Core.Metrics.Sinks; -using Promitor.Core.Scraping.Configuration.Model; -using Promitor.Core.Scraping.Configuration.Model.Metrics; -using Promitor.Core.Scraping.Factories; -using Promitor.Integrations.AzureMonitor; -using Promitor.Integrations.AzureMonitor.Configuration; - -namespace Promitor.Agents.Scraper.Scheduling -{ - public class ResourceDiscoveryGroupScrapingJob : MetricScrapingJob, IScheduledJob - { - private readonly ResourceDiscoveryRepository _resourceDiscoveryRepository; - private readonly MetricDefinition _metricDefinition; - private readonly AzureMetadata _azureMetadata; - private readonly MetricSinkWriter _metricSinkWriter; - private readonly IAzureScrapingPrometheusMetricsCollector _azureScrapingPrometheusMetricsCollector; - private readonly AzureMonitorClientFactory _azureMonitorClientFactory; - private readonly IMemoryCache _memoryCache; - private readonly IConfiguration _configuration; - private readonly IOptions _azureMonitorLoggingConfiguration; - private readonly ILoggerFactory _loggerFactory; - - private readonly MetricScraperFactory _metricScraperFactory; - - public ResourceDiscoveryGroupScrapingJob(string jobName, string resourceDiscoveryGroupName, AzureMetadata azureMetadata, MetricDefinition metricDefinition, ResourceDiscoveryRepository resourceDiscoveryRepository, - MetricSinkWriter metricSinkWriter, - MetricScraperFactory metricScraperFactory, - AzureMonitorClientFactory azureMonitorClientFactory, - IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, - IMemoryCache memoryCache, - IConfiguration configuration, - IOptions azureMonitorLoggingConfiguration, - ILoggerFactory loggerFactory, - ILogger logger) - : base(jobName, logger) - { - Guard.NotNullOrWhitespace(resourceDiscoveryGroupName, nameof(resourceDiscoveryGroupName)); - Guard.NotNull(resourceDiscoveryRepository, nameof(resourceDiscoveryRepository)); - Guard.NotNull(metricDefinition, nameof(metricDefinition)); - Guard.NotNull(azureMetadata, nameof(azureMetadata)); - Guard.NotNullOrWhitespace(jobName, nameof(jobName)); - Guard.NotNull(metricScraperFactory, nameof(metricScraperFactory)); - Guard.NotNull(azureMonitorClientFactory, nameof(azureMonitorClientFactory)); - Guard.NotNull(azureScrapingPrometheusMetricsCollector, nameof(azureScrapingPrometheusMetricsCollector)); - Guard.NotNull(memoryCache, nameof(memoryCache)); - Guard.NotNull(configuration, nameof(configuration)); - Guard.NotNull(azureMonitorLoggingConfiguration, nameof(azureMonitorLoggingConfiguration)); - Guard.NotNull(loggerFactory, nameof(loggerFactory)); - Guard.NotNull(metricSinkWriter, nameof(metricSinkWriter)); - - ResourceDiscoveryGroupName = resourceDiscoveryGroupName; - - _azureMetadata = azureMetadata; - _metricDefinition = metricDefinition; - _resourceDiscoveryRepository = resourceDiscoveryRepository; - _metricSinkWriter = metricSinkWriter; - - _azureScrapingPrometheusMetricsCollector = azureScrapingPrometheusMetricsCollector; - _azureMonitorClientFactory = azureMonitorClientFactory; - _memoryCache = memoryCache; - _configuration = configuration; - _azureMonitorLoggingConfiguration = azureMonitorLoggingConfiguration; - _loggerFactory = loggerFactory; - - _metricScraperFactory = metricScraperFactory; - } - - public string ResourceDiscoveryGroupName { get; } - - public async Task ExecuteAsync(CancellationToken cancellationToken) - { - Logger.LogInformation("Scraping resource collection {ResourceDiscoveryGroup} - {Timestamp}", ResourceDiscoveryGroupName, DateTimeOffset.UtcNow); - - try - { - var discoveredResources = await _resourceDiscoveryRepository.GetResourceDiscoveryGroupAsync(ResourceDiscoveryGroupName); - Logger.LogInformation("Discovered {ResourceCount} resources for resource collection {ResourceDiscoveryGroup}.", discoveredResources?.Count ?? 0, ResourceDiscoveryGroupName); - - if (discoveredResources == null) - { - Logger.LogWarning("Discovered no resources for resource collection {ResourceDiscoveryGroup}.", ResourceDiscoveryGroupName); - return; - } - - List scrapeTasks = new List(); - foreach (var discoveredResource in discoveredResources) - { - Logger.LogDebug($"Scraping discovered resource {discoveredResource}"); - - var azureMonitorClient = _azureMonitorClientFactory.CreateIfNotExists(_azureMetadata.Cloud, _azureMetadata.TenantId, discoveredResource.SubscriptionId, _metricSinkWriter, _azureScrapingPrometheusMetricsCollector, _memoryCache, _configuration, _azureMonitorLoggingConfiguration, _loggerFactory); - - // Scrape resource - var scrapeTask = ScrapeResourceAsync(discoveredResource, azureMonitorClient); - scrapeTasks.Add(scrapeTask); - } - - await Task.WhenAll(scrapeTasks); - } - catch (Exception exception) - { - Logger.LogCritical(exception, "Failed to scrape resource collection {ResourceDiscoveryGroup}: {Exception}", ResourceDiscoveryGroupName, exception.Message); - } - } - - private async Task ScrapeResourceAsync(IAzureResourceDefinition discoveredResource, AzureMonitorClient azureMonitorClient) - { - try - { - var scrapingDefinition = _metricDefinition.CreateScrapeDefinition(discoveredResource, _azureMetadata); - - var scraper = _metricScraperFactory.CreateScraper(scrapingDefinition.Resource.ResourceType, _metricSinkWriter, _azureScrapingPrometheusMetricsCollector, azureMonitorClient); - await scraper.ScrapeAsync(scrapingDefinition); - } - catch (Exception exception) - { - Logger.LogCritical(exception, "Failed to scrape resource collection {Resource}: {Exception}", discoveredResource.UniqueName, exception.Message); - } - } - } -} \ No newline at end of file diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourceScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourceScrapingJob.cs deleted file mode 100644 index b7ab0e957..000000000 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourceScrapingJob.cs +++ /dev/null @@ -1,71 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using CronScheduler.Extensions.Scheduler; -using GuardNet; -using Microsoft.Extensions.Logging; -using Promitor.Core.Contracts; -using Promitor.Core.Metrics.Prometheus.Collectors.Interfaces; -using Promitor.Core.Metrics.Sinks; -using Promitor.Core.Scraping.Configuration.Model.Metrics; -using Promitor.Core.Scraping.Factories; -using Promitor.Integrations.AzureMonitor; - -namespace Promitor.Agents.Scraper.Scheduling -{ - public class ResourceScrapingJob : MetricScrapingJob, - IScheduledJob - { - private readonly ScrapeDefinition _metricScrapeDefinition; - private readonly IAzureScrapingPrometheusMetricsCollector _azureScrapingPrometheusMetricsCollector; - private readonly AzureMonitorClient _azureMonitorClient; - private readonly MetricSinkWriter _metricSinkWriter; - - private readonly MetricScraperFactory _metricScraperFactory; - - public ResourceScrapingJob(string jobName, - ScrapeDefinition metricScrapeDefinition, - MetricSinkWriter metricSinkWriter, - MetricScraperFactory metricScraperFactory, - AzureMonitorClient azureMonitorClient, - IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, - ILogger logger) - : base(jobName, logger) - { - Guard.NotNull(azureScrapingPrometheusMetricsCollector, nameof(azureScrapingPrometheusMetricsCollector)); - Guard.NotNull(metricScrapeDefinition, nameof(metricScrapeDefinition)); - Guard.NotNull(metricScraperFactory, nameof(metricScraperFactory)); - Guard.NotNull(azureMonitorClient, nameof(azureMonitorClient)); - Guard.NotNull(metricSinkWriter, nameof(metricSinkWriter)); - - _metricScrapeDefinition = metricScrapeDefinition; - _metricSinkWriter = metricSinkWriter; - - _azureScrapingPrometheusMetricsCollector = azureScrapingPrometheusMetricsCollector; - _metricScraperFactory = metricScraperFactory; - _azureMonitorClient = azureMonitorClient; - } - - public async Task ExecuteAsync(CancellationToken cancellationToken) - { - Logger.LogInformation("Scraping Azure Monitor - {Timestamp}", DateTimeOffset.UtcNow); - - try - { - await ScrapeMetric(_metricScrapeDefinition); - } - catch (Exception exception) - { - Logger.LogCritical(exception, "Failed to scrape: {Exception}", exception.Message); - } - } - - private async Task ScrapeMetric(ScrapeDefinition metricDefinitionDefinition) - { - Logger.LogInformation("Scraping {MetricName} for resource type {ResourceType}", metricDefinitionDefinition.PrometheusMetricDefinition.Name, metricDefinitionDefinition.Resource.ResourceType); - - var scraper = _metricScraperFactory.CreateScraper(metricDefinitionDefinition.Resource.ResourceType, _metricSinkWriter, _azureScrapingPrometheusMetricsCollector, _azureMonitorClient); - await scraper.ScrapeAsync(metricDefinitionDefinition); - } - } -} \ No newline at end of file diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs new file mode 100644 index 000000000..7d2f36df3 --- /dev/null +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -0,0 +1,311 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using CronScheduler.Extensions.Scheduler; +using GuardNet; +using Microsoft.Extensions.Caching.Memory; +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; +using Promitor.Agents.Scraper.Discovery; +using Promitor.Core.Contracts; +using Promitor.Core.Metrics.Prometheus.Collectors.Interfaces; +using Promitor.Core.Metrics.Sinks; +using Promitor.Core.Scraping.Configuration.Model; +using Promitor.Core.Scraping.Configuration.Model.Metrics; +using Promitor.Core.Scraping.Factories; +using Promitor.Integrations.AzureMonitor.Configuration; + +namespace Promitor.Agents.Scraper.Scheduling +{ + /// + /// A metrics scraping job for one or more resources, either enumerated specifically or + /// identified via resource definition groups. All metrics included are expected to have + /// the same scraping schedule. + /// + public class ResourcesScrapingJob : MetricScrapingJob, IScheduledJob + { + private readonly MetricsDeclaration _metricsDeclaration; + private readonly ResourceDiscoveryRepository _resourceDiscoveryRepository; + private readonly MetricSinkWriter _metricSinkWriter; + private readonly MetricScraperFactory _metricScraperFactory; + private readonly IAzureScrapingPrometheusMetricsCollector _azureScrapingPrometheusMetricsCollector; + private readonly AzureMonitorClientFactory _azureMonitorClientFactory; + private readonly IMemoryCache _resourceMetricDefinitionMemoryCache; + private readonly IScrapingMutex _scrapingTaskMutex; + private readonly IConfiguration _configuration; + private readonly IOptions _azureMonitorLoggingConfiguration; + private readonly ILoggerFactory _loggerFactory; + + /// + /// Create a metrics scraping job for one or more resources, either enumerated specifically or + /// identified via resource definition groups. All metrics included are expected to have + /// the same scraping schedule. + /// + /// name of scheduled job + /// declaration of which metrics to collect from which resources + /// repository source for discovering resources via criteria + /// destination for metric reporting output + /// means of obtaining a metrics scraper for a particular type of resource + /// means of obtaining a Azure Monitor client + /// metrics collector to write metrics to Prometheus + /// cache of metric definitions by resource ID + /// semaphore used to limit concurrency of tasks if configured, or null for no limiting + /// Promitor configuration + /// options for Azure Monitor logging + /// means to obtain a logger + /// logger to use for scraping detail + public ResourcesScrapingJob(string jobName, + MetricsDeclaration metricsDeclaration, + ResourceDiscoveryRepository resourceDiscoveryRepository, + MetricSinkWriter metricSinkWriter, + MetricScraperFactory metricScraperFactory, + AzureMonitorClientFactory azureMonitorClientFactory, + IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, + IMemoryCache resourceMetricDefinitionMemoryCache, + IScrapingMutex scrapingTaskMutex, + IConfiguration configuration, + IOptions azureMonitorLoggingConfiguration, + ILoggerFactory loggerFactory, + ILogger logger) + : base(jobName, logger) + { + Guard.NotNullOrWhitespace(jobName, nameof(jobName)); + Guard.NotNull(metricsDeclaration, nameof(metricsDeclaration)); + Guard.NotNull(metricsDeclaration.AzureMetadata, $"{nameof(metricsDeclaration)}.{nameof(metricsDeclaration.AzureMetadata)}"); + Guard.NotNull(metricsDeclaration.Metrics, $"{nameof(metricsDeclaration)}.{nameof(metricsDeclaration.Metrics)}"); + Guard.NotNull(resourceDiscoveryRepository, nameof(resourceDiscoveryRepository)); + Guard.NotNull(metricSinkWriter, nameof(metricSinkWriter)); + Guard.NotNull(metricScraperFactory, nameof(metricScraperFactory)); + Guard.NotNull(azureMonitorClientFactory, nameof(azureMonitorClientFactory)); + Guard.NotNull(azureScrapingPrometheusMetricsCollector, nameof(azureScrapingPrometheusMetricsCollector)); + Guard.NotNull(resourceMetricDefinitionMemoryCache, nameof(resourceMetricDefinitionMemoryCache)); + Guard.NotNull(configuration, nameof(configuration)); + Guard.NotNull(azureMonitorLoggingConfiguration, nameof(azureMonitorLoggingConfiguration)); + Guard.NotNull(loggerFactory, nameof(loggerFactory)); + + // all metrics must have the same scraping schedule or it is not possible for them to share the same job + VerifyAllMetricsHaveSameScrapingSchedule(metricsDeclaration); + + _metricsDeclaration = metricsDeclaration; + _resourceDiscoveryRepository = resourceDiscoveryRepository; + _metricSinkWriter = metricSinkWriter; + _metricScraperFactory = metricScraperFactory; + _azureMonitorClientFactory = azureMonitorClientFactory; + _azureScrapingPrometheusMetricsCollector = azureScrapingPrometheusMetricsCollector; + _resourceMetricDefinitionMemoryCache = resourceMetricDefinitionMemoryCache; + _scrapingTaskMutex = scrapingTaskMutex; + _configuration = configuration; + _azureMonitorLoggingConfiguration = azureMonitorLoggingConfiguration; + _loggerFactory = loggerFactory; + } + + private static void VerifyAllMetricsHaveSameScrapingSchedule(MetricsDeclaration metricsDeclaration) + { + if (metricsDeclaration.Metrics.Count > 1) + { + var metrics = metricsDeclaration.Metrics; + var commonScraping = metrics[0].Scraping; + + for (var i = 1; i < metrics.Count; i++) + { + if (!Equals(commonScraping, metrics[i].Scraping)) + throw new ArgumentException( + $"The \"{metrics[i].Scraping?.Schedule}\" scraping schedule for {nameof(metricsDeclaration)}.{nameof(metricsDeclaration.Metrics)}[{i}] does not share the common scraping schedule \"{commonScraping?.Schedule}\"."); + } + } + } + + public async Task ExecuteAsync(CancellationToken cancellationToken) + { + Logger.LogDebug("Started scraping job {JobName}.", Name); + + try + { + var scrapeDefinitions = await GetAllScrapeDefinitions(cancellationToken); + + await ScrapeMetrics(scrapeDefinitions, cancellationToken); + } + catch (OperationCanceledException) + { + Logger.LogWarning("Cancelled scraping metrics for job {JobName}.", Name); + } + catch (Exception ex) + { + Logger.LogCritical(ex, "Failed to scrape metrics for job {JobName}.", Name); + } + finally + { + Logger.LogDebug("Ended scraping job {JobName}.", Name); + } + } + + private async Task>> GetAllScrapeDefinitions(CancellationToken cancellationToken) + { + var scrapeDefinitions = new ConcurrentBag>(); + var tasks = new List(); + + foreach (var metric in _metricsDeclaration.Metrics) + { + var metricName = metric?.PrometheusMetricDefinition?.Name ?? "Unknown"; + + try + { + cancellationToken.ThrowIfCancellationRequested(); + + if (metric == null) + { + throw new NullReferenceException("Metric within metrics declaration was null."); + } + + if (metric.ResourceDiscoveryGroups != null) + { + foreach (var resourceDiscoveryGroup in metric.ResourceDiscoveryGroups) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (string.IsNullOrWhiteSpace(resourceDiscoveryGroup?.Name)) + { + Logger.LogError("Found resource discovery group missing a name for metric {MetricName}.", metricName); + } + else + { + await ScheduleLimitedConcurrencyAsyncTask(tasks, () => GetDiscoveryGroupScrapeDefinitions(resourceDiscoveryGroup.Name, metric, scrapeDefinitions), cancellationToken); + } + } + } + + if (metric.Resources != null) + { + foreach (var resourceDefinition in metric.Resources) + { + if (resourceDefinition == null) + { + Logger.LogError("Found null resource for metric {MetricName}.", metricName); + } + else + { + GetResourceScrapeDefinition(resourceDefinition, metric, scrapeDefinitions); + } + } + } + } + catch (OperationCanceledException) + { + throw; + } + catch (Exception ex) + { + Logger.LogError(ex, "Could not get scrape definitions for metric {MetricName}.", metricName); + } + } + + await Task.WhenAll(tasks); + return scrapeDefinitions; + } + + private async Task GetDiscoveryGroupScrapeDefinitions(string resourceDiscoveryGroupName, MetricDefinition metricDefinition, ConcurrentBag> scrapeDefinitions) + { + // this runs in a separate thread, must trap exceptions + try + { + Logger.LogInformation("Scraping resource collection {ResourceDiscoveryGroup}.", resourceDiscoveryGroupName); + + var discoveredResources = await _resourceDiscoveryRepository.GetResourceDiscoveryGroupAsync(resourceDiscoveryGroupName); + if (discoveredResources == null) + { + Logger.LogWarning("Discovered no resources for resource collection {ResourceDiscoveryGroup}.", resourceDiscoveryGroupName); + return; + } + Logger.LogInformation("Discovered {ResourceCount} resources for resource collection {ResourceDiscoveryGroup}.", discoveredResources.Count, resourceDiscoveryGroupName); + + foreach (var discoveredResource in discoveredResources) + { + Logger.LogDebug("Discovered resource {DiscoveredResource}.", discoveredResource); + var scrapeDefinition = metricDefinition.CreateScrapeDefinition(discoveredResource, _metricsDeclaration.AzureMetadata); + scrapeDefinitions.Add(scrapeDefinition); + } + } + catch (Exception ex) + { + Logger.LogError(ex, "Failed to discover resources for group {GroupName}.", resourceDiscoveryGroupName); + } + } + + private void GetResourceScrapeDefinition(IAzureResourceDefinition resourceDefinition, MetricDefinition metricDefinition, ConcurrentBag> scrapeDefinitions) + { + var scrapeDefinition = metricDefinition.CreateScrapeDefinition(resourceDefinition, _metricsDeclaration.AzureMetadata); + scrapeDefinitions.Add(scrapeDefinition); + } + + private async Task ScrapeMetrics(IEnumerable> scrapeDefinitions, CancellationToken cancellationToken) + { + var tasks = new List(); + + foreach (var scrapeDefinition in scrapeDefinitions) + { + cancellationToken.ThrowIfCancellationRequested(); + + var metricName = scrapeDefinition.PrometheusMetricDefinition.Name; + var resourceType = scrapeDefinition.Resource.ResourceType; + Logger.LogInformation("Scraping {MetricName} for resource type {ResourceType}.", metricName, resourceType); + + await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetric(scrapeDefinition), cancellationToken); + } + + await Task.WhenAll(tasks); + } + + private async Task ScrapeMetric(ScrapeDefinition scrapeDefinition) + { + // this runs in a separate thread, must trap exceptions + try + { + var resourceSubscriptionId = !string.IsNullOrWhiteSpace(scrapeDefinition.Resource.SubscriptionId) + ? scrapeDefinition.Resource.SubscriptionId + : _metricsDeclaration.AzureMetadata.SubscriptionId; + var azureMonitorClient = _azureMonitorClientFactory.CreateIfNotExists(_metricsDeclaration.AzureMetadata.Cloud, _metricsDeclaration.AzureMetadata.TenantId, + resourceSubscriptionId, _metricSinkWriter, _azureScrapingPrometheusMetricsCollector, _resourceMetricDefinitionMemoryCache, _configuration, + _azureMonitorLoggingConfiguration, _loggerFactory); + var scraper = _metricScraperFactory.CreateScraper(scrapeDefinition.Resource.ResourceType, _metricSinkWriter, _azureScrapingPrometheusMetricsCollector, azureMonitorClient); + await scraper.ScrapeAsync(scrapeDefinition); + } + catch (Exception ex) + { + Logger.LogError(ex, "Failed to scrape metric {MetricName} for resource {ResourceName}.", + scrapeDefinition.PrometheusMetricDefinition.Name, scrapeDefinition.Resource.ResourceName); + } + } + + /// + /// Run some task work in the thread pool, but only allow a limited number of threads to go at a time + /// (unless max degree of parallelism wasn't configured, in which case mutex is null and no limit is imposed). + /// + private async Task ScheduleLimitedConcurrencyAsyncTask(ICollection tasks, Func asyncWork, CancellationToken cancellationToken) + { + if (_scrapingTaskMutex == null) + { + tasks.Add(Task.Run(asyncWork, cancellationToken)); + return; + } + + await _scrapingTaskMutex.WaitAsync(cancellationToken); + + tasks.Add(Task.Run(() => WorkWrapper(asyncWork), cancellationToken)); + } + + private async Task WorkWrapper(Func work) + { + try + { + await work(); + } + finally + { + _scrapingTaskMutex.Release(); + } + } + } +} \ No newline at end of file diff --git a/src/Promitor.Agents.Scraper/Scheduling/SchedulingExtensions.cs b/src/Promitor.Agents.Scraper/Scheduling/SchedulingExtensions.cs index 7da6ca6a5..cc81acc20 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/SchedulingExtensions.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/SchedulingExtensions.cs @@ -1,22 +1,18 @@ using System; -using System.Linq; -using System.Text; +using System.Collections.Generic; using Microsoft.Extensions.Caching.Memory; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Options; using Promitor.Agents.Core.Observability; using Promitor.Agents.Scraper; using Promitor.Agents.Scraper.Discovery; +using Promitor.Agents.Scraper.Scheduling; using Promitor.Core.Scraping.Configuration.Providers.Interfaces; using Promitor.Core.Scraping.Factories; -using Promitor.Agents.Scraper.Scheduling; -using Promitor.Core.Contracts; using Promitor.Core.Metrics.Prometheus.Collectors.Interfaces; using Promitor.Core.Metrics.Sinks; using Promitor.Core.Scraping.Configuration.Model; -using Promitor.Core.Scraping.Configuration.Model.Metrics; using Promitor.Integrations.AzureMonitor.Configuration; // ReSharper disable once CheckNamespace @@ -33,58 +29,86 @@ public static IServiceCollection ScheduleMetricScraping(this IServiceCollection { var serviceProviderToCreateJobsWith = services.BuildServiceProvider(); var metricsProvider = serviceProviderToCreateJobsWith.GetRequiredService(); - var metrics = metricsProvider.Get(applyDefaults: true); + var allMetrics = metricsProvider.Get(applyDefaults: true); - var loggerFactory = serviceProviderToCreateJobsWith.GetService(); + // this is relied on as non-null down the stack + var loggerFactory = serviceProviderToCreateJobsWith.GetRequiredService(); + var metricSinkWriter = serviceProviderToCreateJobsWith.GetRequiredService(); var azureMonitorLoggingConfiguration = serviceProviderToCreateJobsWith.GetService>(); - var memoryCache = serviceProviderToCreateJobsWith.GetService(); + var resourceMetricDefinitionMemoryCache = serviceProviderToCreateJobsWith.GetService(); var configuration = serviceProviderToCreateJobsWith.GetService(); var runtimeMetricCollector = serviceProviderToCreateJobsWith.GetService(); var azureMonitorClientFactory = serviceProviderToCreateJobsWith.GetRequiredService(); - var startupLogger = loggerFactory != null ? loggerFactory.CreateLogger() : NullLogger.Instance; - foreach (var metric in metrics.Metrics) + var startupLogger = loggerFactory.CreateLogger(); + var scrapingTaskMutex = serviceProviderToCreateJobsWith.GetService(); + + var metricsGroupedByScrapingInterval = GroupMetricsByScrapingInterval(allMetrics); + + foreach (var metricsForScrapingInterval in metricsGroupedByScrapingInterval) { - if (metric.ResourceDiscoveryGroups?.Any() == true) - { - foreach (var resourceDiscoveryGroup in metric.ResourceDiscoveryGroups) - { - ScheduleResourceDiscoveryGroupScraping(resourceDiscoveryGroup, metrics.AzureMetadata, metric, azureMonitorClientFactory, metricSinkWriter, runtimeMetricCollector, configuration, azureMonitorLoggingConfiguration, loggerFactory, startupLogger, services); - } - } + ScheduleResourcesScraping(metricsForScrapingInterval, metricSinkWriter, azureMonitorClientFactory, runtimeMetricCollector, resourceMetricDefinitionMemoryCache, + scrapingTaskMutex, configuration, azureMonitorLoggingConfiguration, loggerFactory, startupLogger, services); + } - if (metric.Resources != null) + return services; + } + + private static ICollection GroupMetricsByScrapingInterval(MetricsDeclaration allMetrics) + { + var metricsGroupings = new Dictionary(); + + foreach (var metric in allMetrics.Metrics) + { + if (!metricsGroupings.TryGetValue(metric.Scraping, out var metricsGroup)) { - foreach (var resource in metric.Resources) + metricsGroup = new MetricsDeclaration { - ScheduleResourceScraping(resource, metrics.AzureMetadata, metric, azureMonitorClientFactory, metricSinkWriter, runtimeMetricCollector, memoryCache, configuration, azureMonitorLoggingConfiguration, loggerFactory, startupLogger, services); - } + AzureMetadata = allMetrics.AzureMetadata, + MetricDefaults = allMetrics.MetricDefaults + }; + metricsGroupings.Add(metric.Scraping, metricsGroup); } + + metricsGroup.Metrics.Add(metric); } - return services; + return metricsGroupings.Values; } - private static void ScheduleResourceScraping(IAzureResourceDefinition resource, AzureMetadata azureMetadata, MetricDefinition metric, AzureMonitorClientFactory azureMonitorClientFactory, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricCollector, IMemoryCache memoryCache, IConfiguration configuration, IOptions azureMonitorLoggingConfiguration, ILoggerFactory loggerFactory, ILogger logger, IServiceCollection services) + private static void ScheduleResourcesScraping(MetricsDeclaration metricsDeclaration, + MetricSinkWriter metricSinkWriter, + AzureMonitorClientFactory azureMonitorClientFactory, + IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, + IMemoryCache resourceMetricDefinitionMemoryCache, + IScrapingMutex scrapingTaskMutex, + IConfiguration configuration, + IOptions azureMonitorLoggingConfiguration, + ILoggerFactory loggerFactory, + ILogger logger, + IServiceCollection services) { - var resourceSubscriptionId = string.IsNullOrWhiteSpace(resource.SubscriptionId) ? azureMetadata.SubscriptionId : resource.SubscriptionId; - var azureMonitorClient = azureMonitorClientFactory.CreateIfNotExists(azureMetadata.Cloud, azureMetadata.TenantId, resourceSubscriptionId, metricSinkWriter, azureScrapingPrometheusMetricCollector, memoryCache, configuration, azureMonitorLoggingConfiguration, loggerFactory); - var scrapeDefinition = metric.CreateScrapeDefinition(resource, azureMetadata); - var jobName = GenerateResourceScrapingJobName(scrapeDefinition, resource); - + var jobName = GenerateResourceScrapingJobName(metricsDeclaration, logger); + services.AddScheduler(builder => { builder.AddJob(jobServices => - { - return new ResourceScrapingJob(jobName, scrapeDefinition, + new ResourcesScrapingJob(jobName, + metricsDeclaration, + jobServices.GetService(), metricSinkWriter, jobServices.GetService(), - azureMonitorClient, - jobServices.GetService(), - jobServices.GetService>()); - }, schedulerOptions => + azureMonitorClientFactory, + azureScrapingPrometheusMetricsCollector, + resourceMetricDefinitionMemoryCache, + scrapingTaskMutex, + configuration, + azureMonitorLoggingConfiguration, + loggerFactory, + jobServices.GetService>()), + schedulerOptions => { - schedulerOptions.CronSchedule = scrapeDefinition.Scraping.Schedule; + schedulerOptions.CronSchedule = metricsDeclaration.Metrics[0].Scraping.Schedule; schedulerOptions.RunImmediately = true; }, jobName: jobName); @@ -98,74 +122,28 @@ private static void ScheduleResourceScraping(IAzureResourceDefinition resource, }; }); }); - - logger.LogInformation("Scheduled scraping job {JobName} for resource {Resource} which will be reported as metric {MetricName}", jobName, scrapeDefinition.Resource.UniqueName, scrapeDefinition.PrometheusMetricDefinition?.Name); + logger.LogInformation("Scheduled scraping job {JobName}.", jobName); } - private static void ScheduleResourceDiscoveryGroupScraping(AzureResourceDiscoveryGroup resourceDiscoveryGroup, AzureMetadata azureMetadata, MetricDefinition metricDefinition, AzureMonitorClientFactory azureMonitorClientFactory, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricCollector, IConfiguration configuration, IOptions azureMonitorLoggingConfiguration, ILoggerFactory loggerFactory, ILogger logger, IServiceCollection services) + private static string GenerateResourceScrapingJobName(MetricsDeclaration metricsDeclaration, ILogger logger) { - var jobName = GenerateResourceDiscoveryGroupScrapingJobName(metricDefinition.PrometheusMetricDefinition?.Name, resourceDiscoveryGroup.Name); - services.AddScheduler(builder => + string scheduleDescription; + try { - builder.AddJob(jobServices => - { - return new ResourceDiscoveryGroupScrapingJob(jobName, resourceDiscoveryGroup.Name, azureMetadata, metricDefinition, - jobServices.GetService(), - metricSinkWriter, - jobServices.GetService(), - azureMonitorClientFactory, - azureScrapingPrometheusMetricCollector, - jobServices.GetService(), - configuration, - azureMonitorLoggingConfiguration, - loggerFactory, - jobServices.GetService>()); - }, schedulerOptions => - { - schedulerOptions.CronSchedule = metricDefinition.Scraping.Schedule; - schedulerOptions.RunImmediately = true; - }, - jobName: jobName); - builder.AddUnobservedTaskExceptionHandler(s => - { - return - (_, exceptionEventArgs) => - { - var exceptionLogger = s.GetService>(); - BackgroundJobMonitor.HandleException(jobName, exceptionEventArgs, exceptionLogger); - }; - }); - }); - - logger.LogInformation("Scheduled scraping job {JobName} for resource collection {ResourceDiscoveryGroup} which will be reported as metric {MetricName}", jobName, resourceDiscoveryGroup.Name, metricDefinition.PrometheusMetricDefinition?.Name); - } - - private static string GenerateResourceScrapingJobName(ScrapeDefinition scrapeDefinition, IAzureResourceDefinition resource) - { - var jobNameBuilder = new StringBuilder(); - jobNameBuilder.Append(scrapeDefinition.SubscriptionId); - jobNameBuilder.Append("-"); - jobNameBuilder.Append(scrapeDefinition.ResourceGroupName); - jobNameBuilder.Append("-"); - jobNameBuilder.Append(scrapeDefinition.PrometheusMetricDefinition.Name); - jobNameBuilder.Append("-"); - jobNameBuilder.Append(resource.UniqueName); - jobNameBuilder.Append("-"); - jobNameBuilder.Append(Guid.NewGuid().ToString()); - - return jobNameBuilder.ToString(); - } + scheduleDescription = CronExpressionDescriptor.ExpressionDescriptor.GetDescription(metricsDeclaration.Metrics[0].Scraping.Schedule); + scheduleDescription = scheduleDescription.Replace(", ", "_").Replace(",", "_").Replace(" ", "_"); + } + catch (Exception ex) + { + var metricName = metricsDeclaration.Metrics[0].AzureMetricConfiguration.MetricName; + logger.LogError(ex, $"Failure to create job name for scraping resources for list of metrics which includes {metricName}."); + scheduleDescription = $"UnparsedScheduleForMetricsIncluding{metricName}"; + } - private static string GenerateResourceDiscoveryGroupScrapingJobName(string metricName, string resourceDiscoveryGroupName) - { - var jobNameBuilder = new StringBuilder(); - jobNameBuilder.Append(metricName); - jobNameBuilder.Append("-"); - jobNameBuilder.Append(resourceDiscoveryGroupName); - jobNameBuilder.Append("-"); - jobNameBuilder.Append(Guid.NewGuid().ToString()); + var metricsCount = metricsDeclaration.Metrics.Count; + var uniqueness = Guid.NewGuid().ToString("N"); - return jobNameBuilder.ToString(); + return $"Scraper-{metricsCount}Metrics-{scheduleDescription}-{uniqueness}"; } } } \ No newline at end of file diff --git a/src/Promitor.Agents.Scraper/Scheduling/ScrapingMutex.cs b/src/Promitor.Agents.Scraper/Scheduling/ScrapingMutex.cs new file mode 100644 index 000000000..a0f02cf04 --- /dev/null +++ b/src/Promitor.Agents.Scraper/Scheduling/ScrapingMutex.cs @@ -0,0 +1,18 @@ +using System.Threading; + +namespace Promitor.Agents.Scraper.Scheduling +{ + /// + public class ScrapingMutex : SemaphoreSlim, IScrapingMutex + { + /// + /// Initializes a new instance of the class, specifying + /// the number scraping tasks that can be processed concurrently. + /// + /// The number scraping tasks that can be processed concurrently. + public ScrapingMutex(int concurrency) + : base(concurrency, concurrency) + { + } + } +} \ No newline at end of file diff --git a/src/Promitor.Agents.Scraper/Startup.cs b/src/Promitor.Agents.Scraper/Startup.cs index 3f3fedb5c..c8767b7b7 100644 --- a/src/Promitor.Agents.Scraper/Startup.cs +++ b/src/Promitor.Agents.Scraper/Startup.cs @@ -53,6 +53,7 @@ public void ConfigureServices(IServiceCollection services) .AddResourceDiscoveryHealthCheck(Configuration); services.UseMetricSinks(Configuration) + .AddScrapingMutex(Configuration) .ScheduleMetricScraping(); } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/MetricDimension.cs b/src/Promitor.Core.Scraping/Configuration/Model/MetricDimension.cs index faf023224..bff307989 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/MetricDimension.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/MetricDimension.cs @@ -1,13 +1,41 @@ -namespace Promitor.Core.Scraping.Configuration.Model +using System; + +namespace Promitor.Core.Scraping.Configuration.Model { /// /// Information about the dimension of an Azure Monitor metric /// - public class MetricDimension + public class MetricDimension : IEquatable { /// /// Name of the dimension /// public string Name { get; set; } + + public bool Equals(MetricDimension other) + { + if (ReferenceEquals(null, other)) + { + return false; + } + + if (ReferenceEquals(this, other)) + { + return true; + } + + return string.Equals(other.Name, Name, StringComparison.OrdinalIgnoreCase); + } + + public override bool Equals(object obj) + { + return Equals(obj as MetricDimension); + } + + public override int GetHashCode() + { + // ReSharper disable once NonReadonlyMemberInGetHashCode - this is providing the means to compare MetricDimension instances without having consumers become dependent on class members + return Name?.ToLowerInvariant().GetHashCode() ?? 0; + } } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Scraping.cs b/src/Promitor.Core.Scraping/Configuration/Model/Scraping.cs index 2f29b1b5b..f4b1dff7d 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Scraping.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Scraping.cs @@ -1,7 +1,35 @@ -namespace Promitor.Core.Scraping.Configuration.Model +using System; + +namespace Promitor.Core.Scraping.Configuration.Model { - public class Scraping + public class Scraping : IEquatable { public string Schedule { get; set; } + + public bool Equals(Scraping other) + { + if (ReferenceEquals(null, other)) + { + return false; + } + + if (ReferenceEquals(this, other)) + { + return true; + } + + return Schedule?.Trim() == other.Schedule?.Trim(); + } + + public override bool Equals(object obj) + { + return Equals(obj as Scraping); + } + + public override int GetHashCode() + { + // ReSharper disable once NonReadonlyMemberInGetHashCode - this is providing the means to compare Scraping instances without having consumers become dependent on class members + return Schedule?.Trim().GetHashCode() ?? 0; + } } } diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorClient.cs index 0607154d3..dfde9a6e8 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorClient.cs @@ -30,7 +30,7 @@ public class AzureMonitorClient private readonly TimeSpan _metricDefinitionCacheDuration = TimeSpan.FromHours(1); private readonly IAzure _authenticatedAzureSubscription; private readonly AzureCredentialsFactory _azureCredentialsFactory = new AzureCredentialsFactory(); - private readonly IMemoryCache _memoryCache; + private readonly IMemoryCache _resourceMetricDefinitionMemoryCache; private readonly ILogger _logger; /// @@ -43,17 +43,17 @@ public class AzureMonitorClient /// Options for Azure Monitor logging /// Writer to send metrics to all configured sinks /// Metrics collector to write metrics to Prometheus - /// Memory cache to store items in for performance optimizations + /// Memory cache to store items in for performance optimizations /// Factory to create loggers with - public AzureMonitorClient(AzureEnvironment azureCloud, string tenantId, string subscriptionId, AzureAuthenticationInfo azureAuthenticationInfo, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, IMemoryCache memoryCache, ILoggerFactory loggerFactory, IOptions azureMonitorLoggingConfiguration) + public AzureMonitorClient(AzureEnvironment azureCloud, string tenantId, string subscriptionId, AzureAuthenticationInfo azureAuthenticationInfo, MetricSinkWriter metricSinkWriter, IAzureScrapingPrometheusMetricsCollector azureScrapingPrometheusMetricsCollector, IMemoryCache resourceMetricDefinitionMemoryCache, ILoggerFactory loggerFactory, IOptions azureMonitorLoggingConfiguration) { Guard.NotNullOrWhitespace(tenantId, nameof(tenantId)); Guard.NotNullOrWhitespace(subscriptionId, nameof(subscriptionId)); Guard.NotNull(azureAuthenticationInfo, nameof(azureAuthenticationInfo)); Guard.NotNull(azureMonitorLoggingConfiguration, nameof(azureMonitorLoggingConfiguration)); - Guard.NotNull(memoryCache, nameof(memoryCache)); + Guard.NotNull(resourceMetricDefinitionMemoryCache, nameof(resourceMetricDefinitionMemoryCache)); - _memoryCache = memoryCache; + _resourceMetricDefinitionMemoryCache = resourceMetricDefinitionMemoryCache; _logger = loggerFactory.CreateLogger(); _authenticatedAzureSubscription = CreateAzureClient(azureCloud, tenantId, subscriptionId, azureAuthenticationInfo, loggerFactory, metricSinkWriter, azureScrapingPrometheusMetricsCollector, azureMonitorLoggingConfiguration); } @@ -115,14 +115,14 @@ public async Task> QueryMetricAsync(string metricName, stri private async Task> GetMetricDefinitionsAsync(string resourceId) { // Get cached metric definitions - if (_memoryCache.TryGetValue(resourceId, out IReadOnlyList metricDefinitions)) + if (_resourceMetricDefinitionMemoryCache.TryGetValue(resourceId, out IReadOnlyList metricDefinitions)) { return metricDefinitions; } // Get from API and cache it var foundMetricDefinitions = await _authenticatedAzureSubscription.MetricDefinitions.ListByResourceAsync(resourceId); - _memoryCache.Set(resourceId, foundMetricDefinitions, _metricDefinitionCacheDuration); + _resourceMetricDefinitionMemoryCache.Set(resourceId, foundMetricDefinitions, _metricDefinitionCacheDuration); return foundMetricDefinitions; } diff --git a/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs b/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs index dd168bef6..d30edbad2 100644 --- a/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs +++ b/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs @@ -5,7 +5,7 @@ using Promitor.Agents.Scraper.Configuration; using Promitor.Tests.Unit.Generators.Config; using Xunit; -using Defaults = Promitor.Agents.Scraper.Configuration.Defaults; +using DefaultsCore = Promitor.Agents.Core.Configuration.Defaults; namespace Promitor.Tests.Unit.Configuration { @@ -139,6 +139,23 @@ public async Task RuntimeConfiguration_HasConfiguredHttpPort_UsesConfigured() Assert.Equal(bogusHttpPort, runtimeConfiguration.Server.HttpPort); } + [Fact] + public async Task RuntimeConfiguration_HasConfiguredMaxDegreeOfParallelism_UsesConfigured() + { + // Arrange + var bogusMaxDegreeOfParallelism = BogusGenerator.Random.Int(); + var configuration = await RuntimeConfigurationGenerator.WithServerConfiguration(maxDegreeOfParallelism: bogusMaxDegreeOfParallelism) + .GenerateAsync(); + + // Act + var runtimeConfiguration = configuration.Get(); + + // Assert + Assert.NotNull(runtimeConfiguration); + Assert.NotNull(runtimeConfiguration.Server); + Assert.Equal(bogusMaxDegreeOfParallelism, runtimeConfiguration.Server.MaxDegreeOfParallelism); + } + [Theory] [InlineData(true)] [InlineData(false)] @@ -270,8 +287,8 @@ public async Task RuntimeConfiguration_HasNoDefaultApplicationInsights_UsesDefau Assert.NotNull(runtimeConfiguration.Telemetry); Assert.NotNull(runtimeConfiguration.Telemetry.ApplicationInsights); Assert.Null(runtimeConfiguration.Telemetry.ApplicationInsights.InstrumentationKey); - Assert.Equal(Defaults.Telemetry.ApplicationInsights.Verbosity, runtimeConfiguration.Telemetry.ApplicationInsights.Verbosity); - Assert.Equal(Defaults.Telemetry.ApplicationInsights.IsEnabled, runtimeConfiguration.Telemetry.ApplicationInsights.IsEnabled); + Assert.Equal(DefaultsCore.Telemetry.ApplicationInsights.Verbosity, runtimeConfiguration.Telemetry.ApplicationInsights.Verbosity); + Assert.Equal(DefaultsCore.Telemetry.ApplicationInsights.IsEnabled, runtimeConfiguration.Telemetry.ApplicationInsights.IsEnabled); } [Fact] @@ -289,8 +306,8 @@ public async Task RuntimeConfiguration_HasNoDefaultContainerLogsConfigured_UsesD Assert.NotNull(runtimeConfiguration); Assert.NotNull(runtimeConfiguration.Telemetry); Assert.NotNull(runtimeConfiguration.Telemetry.ContainerLogs); - Assert.Equal(Defaults.Telemetry.ContainerLogs.Verbosity, runtimeConfiguration.Telemetry.ContainerLogs.Verbosity); - Assert.Equal(Defaults.Telemetry.ContainerLogs.IsEnabled, runtimeConfiguration.Telemetry.ContainerLogs.IsEnabled); + Assert.Equal(DefaultsCore.Telemetry.ContainerLogs.Verbosity, runtimeConfiguration.Telemetry.ContainerLogs.Verbosity); + Assert.Equal(DefaultsCore.Telemetry.ContainerLogs.IsEnabled, runtimeConfiguration.Telemetry.ContainerLogs.IsEnabled); } [Fact] @@ -307,14 +324,30 @@ public async Task RuntimeConfiguration_HasNoDefaultTelemetryVerbosityConfigured_ // Assert Assert.NotNull(runtimeConfiguration); Assert.NotNull(runtimeConfiguration.Telemetry); - Assert.Equal(Defaults.Telemetry.DefaultVerbosity, runtimeConfiguration.Telemetry.DefaultVerbosity); + Assert.Equal(DefaultsCore.Telemetry.DefaultVerbosity, runtimeConfiguration.Telemetry.DefaultVerbosity); } [Fact] public async Task RuntimeConfiguration_HasNoHttpPort_UsesDefault() { // Arrange - var configuration = await RuntimeConfigurationGenerator.WithServerConfiguration(null) + var configuration = await RuntimeConfigurationGenerator.WithoutServerConfiguration() + .GenerateAsync(); + + // Act + var runtimeConfiguration = configuration.Get(); + + // Assert + Assert.NotNull(runtimeConfiguration); + Assert.NotNull(runtimeConfiguration.Server); + Assert.Equal(DefaultsCore.Server.HttpPort, runtimeConfiguration.Server.HttpPort); + } + + [Fact] + public async Task RuntimeConfiguration_HasNoMaxDegreeOfParallelism_UsesDefault() + { + // Arrange + var configuration = await RuntimeConfigurationGenerator.WithoutServerConfiguration() .GenerateAsync(); // Act @@ -323,7 +356,7 @@ public async Task RuntimeConfiguration_HasNoHttpPort_UsesDefault() // Assert Assert.NotNull(runtimeConfiguration); Assert.NotNull(runtimeConfiguration.Server); - Assert.Equal(Defaults.Server.HttpPort, runtimeConfiguration.Server.HttpPort); + Assert.Equal(DefaultsCore.Server.MaxDegreeOfParallelism, runtimeConfiguration.Server.MaxDegreeOfParallelism); } [Fact] @@ -382,6 +415,7 @@ public async Task RuntimeConfiguration_IsFullyConfigured_UsesCorrectValues() Assert.NotNull(runtimeConfiguration.MetricsConfiguration); Assert.NotNull(runtimeConfiguration.ResourceDiscovery); Assert.Equal(bogusRuntimeConfiguration.Server.HttpPort, runtimeConfiguration.Server.HttpPort); + Assert.Equal(bogusRuntimeConfiguration.Server.MaxDegreeOfParallelism, runtimeConfiguration.Server.MaxDegreeOfParallelism); Assert.Equal(bogusRuntimeConfiguration.ResourceDiscovery.Host, runtimeConfiguration.ResourceDiscovery.Host); Assert.Equal(bogusRuntimeConfiguration.ResourceDiscovery.Port, runtimeConfiguration.ResourceDiscovery.Port); Assert.Equal(bogusRuntimeConfiguration.Telemetry.DefaultVerbosity, runtimeConfiguration.Telemetry.DefaultVerbosity); diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Configuration/Model/Metrics/MetricDefinitionTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Configuration/Model/Metrics/MetricDefinitionTests.cs index 040f0fc7c..caa3064bd 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Configuration/Model/Metrics/MetricDefinitionTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Configuration/Model/Metrics/MetricDefinitionTests.cs @@ -84,5 +84,49 @@ public void CreateScrapeDefinition_ResourceHasEmptyResourceGroupName_UsesGlobalN // Assert Assert.Equal(_azureMetadata.ResourceGroupName, scrapeDefinition.ResourceGroupName); } + + [Fact] + public void CreateScrapeDefinition_MetricDimensionIsEquatable_UsesValueComparison() + { + // Arrange + var dimension1 = new MetricDimension { Name = "MyMetricDimension" }; + var dimension2 = new MetricDimension { Name = dimension1.Name }; + + // Assert + Assert.Equal(dimension1, dimension2); + } + + [Fact] + public void CreateScrapeDefinition_MetricDimensionIsDistinguishable_UsesValueComparison() + { + // Arrange + var dimension1 = new MetricDimension { Name = "MetricDimension1" }; + var dimension2 = new MetricDimension { Name = "MetricDimension2" }; + + // Assert + Assert.NotEqual(dimension1, dimension2); + } + + [Fact] + public void CreateScrapeDefinition_ScrapingIsEquatable_UsesValueComparison() + { + // Arrange + var scraping1 = new Promitor.Core.Scraping.Configuration.Model.Scraping { Schedule = "5 * * * *" }; + var scraping2 = new Promitor.Core.Scraping.Configuration.Model.Scraping { Schedule = scraping1.Schedule }; + + // Assert + Assert.Equal(scraping1, scraping2); + } + + [Fact] + public void CreateScrapeDefinition_ScrapingIsDistinguishable_UsesValueComparison() + { + // Arrange + var scraping1 = new Promitor.Core.Scraping.Configuration.Model.Scraping { Schedule = "5 * * * *" }; + var scraping2 = new Promitor.Core.Scraping.Configuration.Model.Scraping { Schedule = "6 * * * *" }; + + // Assert + Assert.NotEqual(scraping1, scraping2); + } } } diff --git a/src/Promitor.Tests.Unit/Generators/Config/BogusScraperRuntimeConfigurationGenerator.cs b/src/Promitor.Tests.Unit/Generators/Config/BogusScraperRuntimeConfigurationGenerator.cs index 4778b7468..7df8e70c3 100644 --- a/src/Promitor.Tests.Unit/Generators/Config/BogusScraperRuntimeConfigurationGenerator.cs +++ b/src/Promitor.Tests.Unit/Generators/Config/BogusScraperRuntimeConfigurationGenerator.cs @@ -118,6 +118,7 @@ private static ServerConfiguration GenerateServerConfiguration() var serverConfiguration = new Faker() .StrictMode(true) .RuleFor(srvConfig => srvConfig.HttpPort, faker => faker.Random.Int()) + .RuleFor(srvConfig => srvConfig.MaxDegreeOfParallelism, faker => faker.Random.Int()) .Generate(); return serverConfiguration; } diff --git a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs index b1afdc790..4ad44e93d 100644 --- a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs +++ b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs @@ -29,18 +29,22 @@ private RuntimeConfigurationGenerator(ScraperRuntimeConfiguration runtimeConfigu _runtimeConfiguration = runtimeConfiguration; } - public static RuntimeConfigurationGenerator WithServerConfiguration(int? httpPort = 888) + public static RuntimeConfigurationGenerator WithServerConfiguration(int httpPort = 888, int maxDegreeOfParallelism = 8) { - var serverConfiguration = httpPort == null - ? null - : new ServerConfiguration - { - HttpPort = httpPort.Value - }; + var serverConfiguration = new ServerConfiguration + { + HttpPort = httpPort, + MaxDegreeOfParallelism = maxDegreeOfParallelism + }; return new RuntimeConfigurationGenerator(serverConfiguration); } + public static RuntimeConfigurationGenerator WithoutServerConfiguration() + { + return new RuntimeConfigurationGenerator(new ScraperRuntimeConfiguration()); + } + public static RuntimeConfigurationGenerator WithRuntimeConfiguration(ScraperRuntimeConfiguration runtimeConfiguration) { return new RuntimeConfigurationGenerator(runtimeConfiguration); @@ -214,6 +218,7 @@ public async Task GenerateAsync() { configurationBuilder.AppendLine("server:"); configurationBuilder.AppendLine($" httpPort: {_runtimeConfiguration?.Server.HttpPort}"); + configurationBuilder.AppendLine($" maxDegreeOfParallelism: {_runtimeConfiguration?.Server.MaxDegreeOfParallelism}"); } if (_runtimeConfiguration?.ResourceDiscovery != null)