-
Notifications
You must be signed in to change notification settings - Fork 809
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Hook up partition compaction end to end implementation #6510
base: master
Are you sure you want to change the base?
Changes from 4 commits
f93d363
a196a55
90f3297
ad84075
1e742cf
5e89a40
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
package compactor | ||
|
||
import ( | ||
"context" | ||
|
||
"github.com/prometheus/prometheus/storage" | ||
"github.com/prometheus/prometheus/util/annotations" | ||
) | ||
|
||
type backgrounChunkSeriesSet struct { | ||
nextSet chan storage.ChunkSeries | ||
actual storage.ChunkSeries | ||
cs storage.ChunkSeriesSet | ||
} | ||
|
||
func (b *backgrounChunkSeriesSet) Next() bool { | ||
s, ok := <-b.nextSet | ||
b.actual = s | ||
return ok | ||
} | ||
|
||
func (b *backgrounChunkSeriesSet) At() storage.ChunkSeries { | ||
return b.actual | ||
} | ||
|
||
func (b *backgrounChunkSeriesSet) Err() error { | ||
return b.cs.Err() | ||
} | ||
|
||
func (b *backgrounChunkSeriesSet) Warnings() annotations.Annotations { | ||
return b.cs.Warnings() | ||
} | ||
|
||
func (b *backgrounChunkSeriesSet) run(ctx context.Context) { | ||
for { | ||
if !b.cs.Next() { | ||
close(b.nextSet) | ||
return | ||
} | ||
|
||
select { | ||
case b.nextSet <- b.cs.At(): | ||
case <-ctx.Done(): | ||
return | ||
} | ||
} | ||
} | ||
|
||
func NewBackgroundChunkSeriesSet(ctx context.Context, cs storage.ChunkSeriesSet) storage.ChunkSeriesSet { | ||
r := &backgrounChunkSeriesSet{ | ||
cs: cs, | ||
nextSet: make(chan storage.ChunkSeries, 1000), | ||
} | ||
|
||
go func() { | ||
r.run(ctx) | ||
}() | ||
|
||
return r | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -160,6 +160,30 @@ var ( | |
} | ||
return compactor, plannerFactory, nil | ||
} | ||
|
||
DefaultBlockDeletableCheckerFactory = func(_ context.Context, _ objstore.InstrumentedBucket, _ log.Logger) compact.BlockDeletableChecker { | ||
return compact.DefaultBlockDeletableChecker{} | ||
} | ||
|
||
PartitionCompactionBlockDeletableCheckerFactory = func(ctx context.Context, bkt objstore.InstrumentedBucket, logger log.Logger) compact.BlockDeletableChecker { | ||
return NewPartitionCompactionBlockDeletableChecker() | ||
} | ||
|
||
DefaultCompactionLifecycleCallbackFactory = func(_ context.Context, _ objstore.InstrumentedBucket, _ log.Logger, _ int, _ string, _ string, _ *compactorMetrics) compact.CompactionLifecycleCallback { | ||
return compact.DefaultCompactionLifecycleCallback{} | ||
} | ||
|
||
ShardedCompactionLifecycleCallbackFactory = func(ctx context.Context, userBucket objstore.InstrumentedBucket, logger log.Logger, metaSyncConcurrency int, compactDir string, userID string, compactorMetrics *compactorMetrics) compact.CompactionLifecycleCallback { | ||
return NewShardedCompactionLifecycleCallback( | ||
ctx, | ||
userBucket, | ||
logger, | ||
metaSyncConcurrency, | ||
compactDir, | ||
userID, | ||
compactorMetrics, | ||
) | ||
} | ||
) | ||
|
||
// BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks. | ||
|
@@ -202,6 +226,22 @@ type PlannerFactory func( | |
compactorMetrics *compactorMetrics, | ||
) compact.Planner | ||
|
||
type CompactionLifecycleCallbackFactory func( | ||
ctx context.Context, | ||
userBucket objstore.InstrumentedBucket, | ||
logger log.Logger, | ||
metaSyncConcurrency int, | ||
compactDir string, | ||
userID string, | ||
compactorMetrics *compactorMetrics, | ||
) compact.CompactionLifecycleCallback | ||
|
||
type BlockDeletableCheckerFactory func( | ||
ctx context.Context, | ||
bkt objstore.InstrumentedBucket, | ||
logger log.Logger, | ||
) compact.BlockDeletableChecker | ||
|
||
// Limits defines limits used by the Compactor. | ||
type Limits interface { | ||
CompactorTenantShardSize(userID string) int | ||
|
@@ -380,6 +420,10 @@ type Compactor struct { | |
|
||
blocksPlannerFactory PlannerFactory | ||
|
||
blockDeletableCheckerFactory BlockDeletableCheckerFactory | ||
|
||
compactionLifecycleCallbackFactory CompactionLifecycleCallbackFactory | ||
|
||
// Client used to run operations on the bucket storing blocks. | ||
bucketClient objstore.InstrumentedBucket | ||
|
||
|
@@ -436,11 +480,25 @@ func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfi | |
} | ||
} | ||
|
||
var blockDeletableCheckerFactory BlockDeletableCheckerFactory | ||
if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle && compactorCfg.CompactionStrategy == util.CompactionStrategyPartitioning { | ||
blockDeletableCheckerFactory = PartitionCompactionBlockDeletableCheckerFactory | ||
} else { | ||
blockDeletableCheckerFactory = DefaultBlockDeletableCheckerFactory | ||
} | ||
|
||
var compactionLifecycleCallbackFactory CompactionLifecycleCallbackFactory | ||
if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle && compactorCfg.CompactionStrategy == util.CompactionStrategyPartitioning { | ||
compactionLifecycleCallbackFactory = ShardedCompactionLifecycleCallbackFactory | ||
} else { | ||
compactionLifecycleCallbackFactory = DefaultCompactionLifecycleCallbackFactory | ||
} | ||
|
||
if ingestionReplicationFactor <= 0 { | ||
ingestionReplicationFactor = 1 | ||
} | ||
|
||
cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, limits, ingestionReplicationFactor) | ||
cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory, blockDeletableCheckerFactory, compactionLifecycleCallbackFactory, limits, ingestionReplicationFactor) | ||
if err != nil { | ||
return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") | ||
} | ||
|
@@ -456,6 +514,8 @@ func newCompactor( | |
bucketClientFactory func(ctx context.Context) (objstore.InstrumentedBucket, error), | ||
blocksGrouperFactory BlocksGrouperFactory, | ||
blocksCompactorFactory BlocksCompactorFactory, | ||
blockDeletableCheckerFactory BlockDeletableCheckerFactory, | ||
compactionLifecycleCallbackFactory CompactionLifecycleCallbackFactory, | ||
limits *validation.Overrides, | ||
ingestionReplicationFactor int, | ||
) (*Compactor, error) { | ||
|
@@ -466,15 +526,17 @@ func newCompactor( | |
compactorMetrics = newDefaultCompactorMetrics(registerer) | ||
} | ||
c := &Compactor{ | ||
compactorCfg: compactorCfg, | ||
storageCfg: storageCfg, | ||
parentLogger: logger, | ||
logger: log.With(logger, "component", "compactor"), | ||
registerer: registerer, | ||
bucketClientFactory: bucketClientFactory, | ||
blocksGrouperFactory: blocksGrouperFactory, | ||
blocksCompactorFactory: blocksCompactorFactory, | ||
allowedTenants: util.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), | ||
compactorCfg: compactorCfg, | ||
storageCfg: storageCfg, | ||
parentLogger: logger, | ||
logger: log.With(logger, "component", "compactor"), | ||
registerer: registerer, | ||
bucketClientFactory: bucketClientFactory, | ||
blocksGrouperFactory: blocksGrouperFactory, | ||
blocksCompactorFactory: blocksCompactorFactory, | ||
blockDeletableCheckerFactory: blockDeletableCheckerFactory, | ||
compactionLifecycleCallbackFactory: compactionLifecycleCallbackFactory, | ||
allowedTenants: util.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), | ||
|
||
CompactorStartDurationSeconds: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ | ||
Name: "cortex_compactor_start_duration_seconds", | ||
|
@@ -662,12 +724,6 @@ func (c *Compactor) starting(ctx context.Context) error { | |
}, c.bucketClient, c.usersScanner, c.compactorCfg.CompactionVisitMarkerTimeout, c.limits, c.parentLogger, cleanerRingLifecyclerID, c.registerer, c.compactorCfg.CleanerVisitMarkerTimeout, c.compactorCfg.CleanerVisitMarkerFileUpdateInterval, | ||
c.compactorMetrics.syncerBlocksMarkedForDeletion, c.compactorMetrics.remainingPlannedCompactions) | ||
|
||
// Ensure an initial cleanup occurred before starting the compactor. | ||
if err := services.StartAndAwaitRunning(ctx, c.blocksCleaner); err != nil { | ||
c.ringSubservices.StopAsync() | ||
return errors.Wrap(err, "failed to start the blocks cleaner") | ||
} | ||
|
||
if c.compactorCfg.CachingBucketEnabled { | ||
matchers := cortex_tsdb.NewMatchers() | ||
// Do not cache tenant deletion marker and block deletion marker for compactor | ||
|
@@ -698,15 +754,26 @@ func (c *Compactor) stopping(_ error) error { | |
} | ||
|
||
func (c *Compactor) running(ctx context.Context) error { | ||
// Ensure an initial cleanup occurred as first thing when running compactor. | ||
if err := services.StartAndAwaitRunning(ctx, c.blocksCleaner); err != nil { | ||
c.ringSubservices.StopAsync() | ||
return errors.Wrap(err, "failed to start the blocks cleaner") | ||
} | ||
|
||
// Run an initial compaction before starting the interval. | ||
// Insert jitter right before compaction starts to avoid multiple starting compactor to be in sync | ||
time.Sleep(time.Duration(rand.Int63n(int64(float64(c.compactorCfg.CompactionInterval) * 0.1)))) | ||
c.compactUsers(ctx) | ||
|
||
ticker := time.NewTicker(util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.05)) | ||
ticker := time.NewTicker(c.compactorCfg.CompactionInterval) | ||
defer ticker.Stop() | ||
|
||
for { | ||
select { | ||
case <-ticker.C: | ||
// Insert jitter right before compaction starts, so that there will always | ||
// have jitter even compaction time is longer than CompactionInterval | ||
time.Sleep(time.Duration(rand.Int63n(int64(float64(c.compactorCfg.CompactionInterval) * 0.1)))) | ||
c.compactUsers(ctx) | ||
case <-ctx.Done(): | ||
return nil | ||
|
@@ -717,23 +784,19 @@ func (c *Compactor) running(ctx context.Context) error { | |
} | ||
|
||
func (c *Compactor) compactUsers(ctx context.Context) { | ||
failed := false | ||
succeeded := false | ||
interrupted := false | ||
compactionErrorCount := 0 | ||
|
||
c.CompactionRunsStarted.Inc() | ||
|
||
defer func() { | ||
// interruptions and successful runs are considered | ||
// mutually exclusive but we consider a run failed if any | ||
// tenant runs failed even if later runs are interrupted | ||
if !interrupted && !failed { | ||
if succeeded && compactionErrorCount == 0 { | ||
c.CompactionRunsCompleted.Inc() | ||
c.CompactionRunsLastSuccess.SetToCurrentTime() | ||
} | ||
if interrupted { | ||
} else if interrupted { | ||
c.CompactionRunsInterrupted.Inc() | ||
} | ||
if failed { | ||
} else { | ||
c.CompactionRunsFailed.Inc() | ||
} | ||
|
||
|
@@ -747,7 +810,6 @@ func (c *Compactor) compactUsers(ctx context.Context) { | |
level.Info(c.logger).Log("msg", "discovering users from bucket") | ||
users, err := c.discoverUsersWithRetries(ctx) | ||
if err != nil { | ||
failed = true | ||
level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err) | ||
return | ||
} | ||
|
@@ -816,7 +878,7 @@ func (c *Compactor) compactUsers(ctx context.Context) { | |
} | ||
|
||
c.CompactionRunFailedTenants.Inc() | ||
failed = true | ||
compactionErrorCount++ | ||
level.Error(c.logger).Log("msg", "failed to compact user blocks", "user", userID, "err", err) | ||
continue | ||
} | ||
|
@@ -851,6 +913,7 @@ func (c *Compactor) compactUsers(ctx context.Context) { | |
} | ||
} | ||
} | ||
succeeded = true | ||
} | ||
|
||
func (c *Compactor) compactUserWithRetries(ctx context.Context, userID string) error { | ||
|
@@ -885,6 +948,11 @@ func (c *Compactor) compactUserWithRetries(ctx context.Context, userID string) e | |
retries.Wait() | ||
} | ||
|
||
err := errors.Unwrap(errors.Cause(lastErr)) | ||
if errors.Is(err, plannerCompletedPartitionError) || errors.Is(err, plannerVisitedPartitionError) { | ||
return nil | ||
} | ||
|
||
return lastErr | ||
} | ||
|
||
|
@@ -898,7 +966,12 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { | |
|
||
// Filters out duplicate blocks that can be formed from two or more overlapping | ||
// blocks that fully submatches the source blocks of the older blocks. | ||
deduplicateBlocksFilter := block.NewDeduplicateFilter(c.compactorCfg.BlockSyncConcurrency) | ||
var deduplicateBlocksFilter CortexMetadataFilter | ||
if c.compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle && c.compactorCfg.CompactionStrategy == util.CompactionStrategyPartitioning { | ||
deduplicateBlocksFilter = &DisabledDeduplicateFilter{} | ||
} else { | ||
deduplicateBlocksFilter = block.NewDeduplicateFilter(c.compactorCfg.BlockSyncConcurrency) | ||
} | ||
|
||
// While fetching blocks, we filter out blocks that were marked for deletion by using IgnoreDeletionMarkFilter. | ||
// No delay is used -- all blocks with deletion marker are ignored, and not considered for compaction. | ||
|
@@ -966,12 +1039,14 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { | |
|
||
currentCtx, cancel := context.WithCancel(ctx) | ||
defer cancel() | ||
compactor, err := compact.NewBucketCompactor( | ||
compactor, err := compact.NewBucketCompactorWithCheckerAndCallback( | ||
ulogger, | ||
syncer, | ||
c.blocksGrouperFactory(currentCtx, c.compactorCfg, bucket, ulogger, c.BlocksMarkedForNoCompaction, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed, syncerMetrics, c.compactorMetrics, c.ring, c.ringLifecycler, c.limits, userID, noCompactMarkerFilter, c.ingestionReplicationFactor), | ||
c.blocksPlannerFactory(currentCtx, bucket, ulogger, c.compactorCfg, noCompactMarkerFilter, c.ringLifecycler, userID, c.blockVisitMarkerReadFailed, c.blockVisitMarkerWriteFailed, c.compactorMetrics), | ||
c.blocksCompactor, | ||
c.blockDeletableCheckerFactory(currentCtx, bucket, ulogger), | ||
c.compactionLifecycleCallbackFactory(currentCtx, bucket, ulogger, c.compactorCfg.MetaSyncConcurrency, c.compactDirForUser(userID), userID, c.compactorMetrics), | ||
c.compactDirForUser(userID), | ||
bucket, | ||
c.compactorCfg.CompactionConcurrency, | ||
|
@@ -982,6 +1057,7 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { | |
} | ||
|
||
if err := compactor.Compact(ctx); err != nil { | ||
level.Warn(ulogger).Log("msg", "compaction failed with error", "err", err) | ||
return errors.Wrap(err, "compaction") | ||
} | ||
|
||
|
@@ -1148,3 +1224,20 @@ func (c *Compactor) isPermissionDeniedErr(err error) bool { | |
} | ||
return s.Code() == codes.PermissionDenied | ||
} | ||
|
||
type CortexMetadataFilter interface { | ||
block.DeduplicateFilter | ||
block.MetadataFilter | ||
} | ||
|
||
type DisabledDeduplicateFilter struct { | ||
} | ||
|
||
func (f *DisabledDeduplicateFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced block.GaugeVec, modified block.GaugeVec) error { | ||
// don't do any deduplicate filtering | ||
return nil | ||
} | ||
|
||
func (f *DisabledDeduplicateFilter) DuplicateIDs() []ulid.ULID { | ||
return nil | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we make these types private? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a specific reason why we have to move cleaning here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Because cleaner cycle might be running for a while depending on how many tenants and how big each tenants are. We don't want to compactor got into unhealthy state in the ring because of long running cleaner process.