Skip to content

Commit

Permalink
save cluster info in multi bsons
Browse files Browse the repository at this point in the history
  • Loading branch information
simagix committed Jul 17, 2022
1 parent 0c18dc3 commit ccb5c06
Show file tree
Hide file tree
Showing 5 changed files with 57 additions and 29 deletions.
8 changes: 4 additions & 4 deletions comparison.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,8 @@ func (p *Comparison) compare() error {
var err error
// build target stats map
dbMap := map[string]mdb.Database{}
for i, db := range p.TargetStats.Databases {
dbMap[db.Name] = p.TargetStats.Databases[i]
for i, db := range *p.TargetStats.Databases {
dbMap[db.Name] = (*p.TargetStats.Databases)[i]
}
// compare a few key metrics
codeDefault := mdb.CodeDefault
Expand All @@ -129,8 +129,8 @@ func (p *Comparison) compare() error {
printer := message.NewPrinter(language.English)
p.Logger.Info("=== Comparison Results (source vs. target) ===")
p.Logger.Info(printer.Sprintf("Number of Databases: \t%12d%v\t%12d%v",
len(p.SourceStats.Databases), p.getColor(int64(len(p.SourceStats.Databases)), int64(len(p.TargetStats.Databases))), len(p.TargetStats.Databases), codeDefault))
for _, db := range p.SourceStats.Databases {
len(*p.SourceStats.Databases), p.getColor(int64(len(*p.SourceStats.Databases)), int64(len(*p.TargetStats.Databases))), len(*p.TargetStats.Databases), codeDefault))
for _, db := range *p.SourceStats.Databases {
collMap := map[string]mdb.Collection{}
for i, coll := range dbMap[db.Name].Collections {
collMap[coll.NS] = dbMap[db.Name].Collections[i]
Expand Down
68 changes: 48 additions & 20 deletions mdb/cluster_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
package mdb

import (
"bytes"
"context"
"errors"
"fmt"
Expand Down Expand Up @@ -33,7 +34,7 @@ type ClusterStats struct {
BuildInfo BuildInfo `bson:"buildInfo"`
CmdLineOpts CmdLineOpts `bson:"getCmdLineOpts"`
Cluster string `bson:"cluster"`
Databases []Database `bson:"databases"`
Databases *[]Database `bson:"databases"`
Host string `bson:"host"`
HostInfo HostInfo `bson:"hostInfo"`
Logger *gox.Logger `bson:"keyhole"`
Expand Down Expand Up @@ -121,9 +122,11 @@ func (p *ClusterStats) GetClusterStats(client *mongo.Client, connString connstri
db.SetRedaction(p.redact)
db.SetVerbose(p.verbose)
db.SetFastMode(p.fastMode)
if p.Databases, err = db.GetAllDatabasesStats(client, p.dbNames); err != nil {
var databases []Database
if databases, err = db.GetAllDatabasesStats(client, p.dbNames); err != nil {
p.Logger.Info(fmt.Sprintf(`GetAllDatabasesStats(): %v`, err))
}
p.Databases = &databases
return nil
}

Expand Down Expand Up @@ -264,23 +267,6 @@ func (p *ClusterStats) OutputBSON() (string, []byte, error) {
result := `roles 'clusterMonitor' and 'readAnyDatabase' are required`
return ofile, data, errors.New(result)
}
if data, err = bson.Marshal(p); err != nil {
maxCollections := 2022
left := maxCollections
gox.GetLogger("").Errorf("error marshaling bson: %v, retry and limit # of collections to %v", err, maxCollections)
for i, db := range p.Databases {
if len(db.Collections) < left {
left -= len(db.Collections)
} else {
p.Databases[i].Collections = db.Collections[:left]
p.Databases = p.Databases[:i+1]
break
}
}
if data, err = bson.Marshal(p); err != nil {
return ofile, data, err
}
}

os.Mkdir(outdir, 0755)
basename := p.HostInfo.System.Hostname
Expand All @@ -292,7 +278,49 @@ func (p *ClusterStats) OutputBSON() (string, []byte, error) {
i++
}

if err = gox.OutputGzipped(data, ofile); err != nil {
databases := p.Databases
p.Databases = nil
var summaries []Database
for _, db := range *databases {
dbSummary := Database{
Name: db.Name,
SizeOnDisk: db.SizeOnDisk,
Empty: db.Empty,
Shards: db.Shards,
Stats: db.Stats}
summaries = append(summaries, dbSummary)
}
p.Databases = &summaries
var buffer bytes.Buffer
if data, err = bson.Marshal(p); err != nil {
return ofile, data, err
}
nw := 0
var n int
for nw < len(data) {
if n, err = buffer.Write(data); err != nil {
return ofile, data, err
}
nw += n
}

for _, db := range *databases {
for _, coll := range db.Collections {
if data, err = bson.Marshal(coll); err != nil {
return ofile, data, err
}
nw := 0
var n int
for nw < len(data) {
if n, err = buffer.Write(data); err != nil {
return ofile, data, err
}
nw += n
}
}
}

if err = gox.OutputGzipped(buffer.Bytes(), ofile); err != nil {
return ofile, data, err
}
fmt.Printf("bson data written to %v\n", ofile)
Expand Down
6 changes: 3 additions & 3 deletions mdb/cluster_stats_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,10 @@ func TestOutputBSON(t *testing.T) {
}

// test bson.Marshal() overflow
// for index := range stats.Databases {
// for index, db := range *stats.Databases {
// for i := 0; i < 12; i++ {
// colls := stats.Databases[index].Collections
// stats.Databases[index].Collections = append(stats.Databases[index].Collections, colls...)
// colls := db.Collections
// (*stats.Databases)[index].Collections = append(db.Collections, colls...)
// }
// }

Expand Down
2 changes: 1 addition & 1 deletion mdb/databases_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ func (p *DatabaseStats) collectChunksDistribution(client *mongo.Client, shard st
var mu sync.Mutex
coll := client.Database("config").Collection("collections")
if err = coll.FindOne(ctx, bson.D{{Key: "_id", Value: ns}, {Key: "dropped", Value: bson.M{"$ne": true}}}).Decode(&doc); err != nil {
return chunk, err
return chunk, nil
}
for _, v := range doc {
if v.Key == "key" {
Expand Down
2 changes: 1 addition & 1 deletion version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.2.7
1.3.0

0 comments on commit ccb5c06

Please sign in to comment.