Skip to content

Commit

Permalink
feat(monitor): add cluster display name in overview interface
Browse files Browse the repository at this point in the history
  • Loading branch information
wangao1236 authored and tke-robot committed Sep 8, 2020
1 parent c706d8d commit 047aa28
Show file tree
Hide file tree
Showing 8 changed files with 336 additions and 269 deletions.
1 change: 1 addition & 0 deletions api/monitor/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,7 @@ type ClusterOverviewResult struct {

type ClusterStatistic struct {
ClusterID string
ClusterDisplayName string
TenantID string
ClusterPhase string
NodeCount int32
Expand Down
419 changes: 231 additions & 188 deletions api/monitor/v1/generated.pb.go

Large diffs are not rendered by default.

56 changes: 29 additions & 27 deletions api/monitor/v1/generated.proto

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

55 changes: 28 additions & 27 deletions api/monitor/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,33 +227,34 @@ type ClusterOverviewResult struct {

type ClusterStatistic struct {
ClusterID string `json:"clusterID" protobuf:"bytes,1,opt,name=clusterID"`
TenantID string `json:"tenantID" protobuf:"bytes,2,opt,name=tenantID"`
ClusterPhase string `json:"clusterPhase" protobuf:"bytes,3,opt,name=clusterPhase"`
NodeCount int32 `json:"nodeCount" protobuf:"bytes,4,opt,name=nodeCount"`
NodeAbnormal int32 `json:"nodeAbnormal" protobuf:"bytes,5,opt,name=nodeAbnormal"`
WorkloadCount int32 `json:"workloadCount" protobuf:"bytes,6,opt,name=workloadCount"`
WorkloadAbnormal int32 `json:"workloadAbnormal" protobuf:"bytes,7,opt,name=workloadAbnormal"`
HasMetricServer bool `json:"hasMetricServer" protobuf:"bytes,8,opt,name=hasMetricServer"`
CPUUsed float64 `json:"cpuUsed" protobuf:"bytes,9,opt,name=cpuUsed"`
CPURequest float64 `json:"cpuRequest" protobuf:"bytes,10,opt,name=cpuRequest"`
CPULimit float64 `json:"cpuLimit" protobuf:"bytes,11,opt,name=cpuLimit"`
CPUCapacity float64 `json:"cpuCapacity" protobuf:"bytes,12,opt,name=cpuCapacity"`
CPUAllocatable float64 `json:"cpuAllocatable" protobuf:"bytes,13,opt,name=cpuAllocatable"`
CPURequestRate string `json:"cpuRequestRate" protobuf:"bytes,14,opt,name=cpuRequestRate"`
CPUAllocatableRate string `json:"cpuAllocatableRate" protobuf:"bytes,15,opt,name=cpuAllocatableRate"`
CPUUsage string `json:"cpuUsage" protobuf:"bytes,16,opt,name=cpuUsage"`
MemUsed int64 `json:"memUsed" protobuf:"bytes,17,opt,name=memUsed"`
MemRequest int64 `json:"memRequest" protobuf:"bytes,18,opt,name=memRequest"`
MemLimit int64 `json:"memLimit" protobuf:"bytes,19,opt,name=memLimit"`
MemCapacity int64 `json:"memCapacity" protobuf:"bytes,20,opt,name=memCapacity"`
MemAllocatable int64 `json:"memAllocatable" protobuf:"bytes,21,opt,name=memAllocatable"`
MemRequestRate string `json:"memRequestRate" protobuf:"bytes,22,opt,name=memRequestRate"`
MemAllocatableRate string `json:"memAllocatableRate" protobuf:"bytes,23,opt,name=memAllocatableRate"`
MemUsage string `json:"memUsage" protobuf:"bytes,24,opt,name=memUsage"`
PodCount int32 `json:"podCount" protobuf:"bytes,25,opt,name=podCount"`
SchedulerHealthy bool `json:"schedulerHealthy" protobuf:"bytes,26,opt,name=schedulerHealthy"`
ControllerManagerHealthy bool `json:"controllerManagerHealthy" protobuf:"bytes,27,opt,name=controllerManagerHealthy"`
EtcdHealthy bool `json:"etcdHealthy" protobuf:"bytes,28,opt,name=etcdHealthy"`
ClusterDisplayName string `json:"clusterDisplayName" protobuf:"bytes,2,opt,name=clusterDisplayName"`
TenantID string `json:"tenantID" protobuf:"bytes,3,opt,name=tenantID"`
ClusterPhase string `json:"clusterPhase" protobuf:"bytes,4,opt,name=clusterPhase"`
NodeCount int32 `json:"nodeCount" protobuf:"bytes,5,opt,name=nodeCount"`
NodeAbnormal int32 `json:"nodeAbnormal" protobuf:"bytes,6,opt,name=nodeAbnormal"`
WorkloadCount int32 `json:"workloadCount" protobuf:"bytes,7,opt,name=workloadCount"`
WorkloadAbnormal int32 `json:"workloadAbnormal" protobuf:"bytes,8,opt,name=workloadAbnormal"`
HasMetricServer bool `json:"hasMetricServer" protobuf:"bytes,9,opt,name=hasMetricServer"`
CPUUsed float64 `json:"cpuUsed" protobuf:"bytes,10,opt,name=cpuUsed"`
CPURequest float64 `json:"cpuRequest" protobuf:"bytes,11,opt,name=cpuRequest"`
CPULimit float64 `json:"cpuLimit" protobuf:"bytes,12,opt,name=cpuLimit"`
CPUCapacity float64 `json:"cpuCapacity" protobuf:"bytes,13,opt,name=cpuCapacity"`
CPUAllocatable float64 `json:"cpuAllocatable" protobuf:"bytes,14,opt,name=cpuAllocatable"`
CPURequestRate string `json:"cpuRequestRate" protobuf:"bytes,15,opt,name=cpuRequestRate"`
CPUAllocatableRate string `json:"cpuAllocatableRate" protobuf:"bytes,16,opt,name=cpuAllocatableRate"`
CPUUsage string `json:"cpuUsage" protobuf:"bytes,17,opt,name=cpuUsage"`
MemUsed int64 `json:"memUsed" protobuf:"bytes,18,opt,name=memUsed"`
MemRequest int64 `json:"memRequest" protobuf:"bytes,19,opt,name=memRequest"`
MemLimit int64 `json:"memLimit" protobuf:"bytes,20,opt,name=memLimit"`
MemCapacity int64 `json:"memCapacity" protobuf:"bytes,21,opt,name=memCapacity"`
MemAllocatable int64 `json:"memAllocatable" protobuf:"bytes,22,opt,name=memAllocatable"`
MemRequestRate string `json:"memRequestRate" protobuf:"bytes,23,opt,name=memRequestRate"`
MemAllocatableRate string `json:"memAllocatableRate" protobuf:"bytes,24,opt,name=memAllocatableRate"`
MemUsage string `json:"memUsage" protobuf:"bytes,25,opt,name=memUsage"`
PodCount int32 `json:"podCount" protobuf:"bytes,26,opt,name=podCount"`
SchedulerHealthy bool `json:"schedulerHealthy" protobuf:"bytes,27,opt,name=schedulerHealthy"`
ControllerManagerHealthy bool `json:"controllerManagerHealthy" protobuf:"bytes,28,opt,name=controllerManagerHealthy"`
EtcdHealthy bool `json:"etcdHealthy" protobuf:"bytes,29,opt,name=etcdHealthy"`
}

// +genclient
Expand Down
2 changes: 2 additions & 0 deletions api/monitor/v1/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 7 additions & 1 deletion api/openapi/zz_generated.openapi.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 5 additions & 4 deletions pkg/monitor/registry/overview/cluster/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
businessversionedclient "tkestack.io/tke/api/client/clientset/versioned/typed/business/v1"
platformversionedclient "tkestack.io/tke/api/client/clientset/versioned/typed/platform/v1"
"tkestack.io/tke/api/monitor"
platformv1 "tkestack.io/tke/api/platform/v1"
"tkestack.io/tke/pkg/apiserver/authentication"
apiserverutil "tkestack.io/tke/pkg/apiserver/util"
"tkestack.io/tke/pkg/monitor/util/cache"
Expand Down Expand Up @@ -89,13 +90,13 @@ func (r *REST) Create(ctx context.Context, obj runtime.Object, _ rest.ValidateOb
log.Infof("create cluster overview: %+v, tenantID: %+v, wrappedOptions: %+v",
listOptions, tenantID, wrappedOptions)

clusterIDs := make([]string, 0)
clusters := make([]*platformv1.Cluster, 0)
if clusterList, err := r.platformClient.Clusters().List(ctx, listOptions); err == nil && clusterList != nil {
for _, cls := range clusterList.Items {
clusterIDs = append(clusterIDs, cls.GetName())
for i := 0; i < len(clusterList.Items); i++ {
clusters = append(clusters, &clusterList.Items[i])
}
}
clusterOverview.Result = r.cacher.GetClusterOverviewResult(clusterIDs)
clusterOverview.Result = r.cacher.GetClusterOverviewResult(clusters)

if r.businessClient == nil {
log.Info("The client for Business API Server is not installed")
Expand Down
55 changes: 33 additions & 22 deletions pkg/monitor/util/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,13 @@ type Component string
const (
AllNamespaces = ""

ClusterClientSet = "ClusterClientSet"
WorkloadCounter = "WorkloadCounter"
ResourceCounter = "ResourceCounter"
ClusterPhase = "ClusterPhase"
TenantID = "TenantID"
ComponentHealth = "ComponentHealth"
ClusterClientSet = "ClusterClientSet"
WorkloadCounter = "WorkloadCounter"
ResourceCounter = "ResourceCounter"
ClusterPhase = "ClusterPhase"
TenantID = "TenantID"
ClusterDisplayName = "ClusterDisplayName"
ComponentHealth = "ComponentHealth"

TAppResourceName = "tapps"
TAppGroupName = "apps.tkestack.io"
Expand Down Expand Up @@ -86,7 +87,7 @@ var (

type Cacher interface {
Reload()
GetClusterOverviewResult(clusterIDs []string) *monitor.ClusterOverviewResult
GetClusterOverviewResult(clusters []*platformv1.Cluster) *monitor.ClusterOverviewResult
}

type cacher struct {
Expand Down Expand Up @@ -139,11 +140,13 @@ func (c *cacher) getClusters() {
finished, allTask, cls.GetName(), time.Since(started).Seconds())
}()
clusterID := cls.GetName()
clusterDisplayName := cls.Spec.DisplayName
tenantID := cls.Spec.TenantID
if cls.Status.Phase != platformv1.ClusterRunning {
syncMap.Store(clusterID, map[string]interface{}{
ClusterPhase: string(cls.Status.Phase),
TenantID: tenantID,
ClusterDisplayName: clusterDisplayName,
ClusterPhase: string(cls.Status.Phase),
TenantID: tenantID,
})
return
}
Expand All @@ -163,12 +166,13 @@ func (c *cacher) getClusters() {
health := &util.ComponentHealth{}
c.getComponentStatuses(clusterID, clientSet, health)
syncMap.Store(clusterID, map[string]interface{}{
ClusterClientSet: clientSet,
WorkloadCounter: workloadCounter,
ResourceCounter: resourceCounter,
ClusterPhase: string(cls.Status.Phase),
TenantID: tenantID,
ComponentHealth: health,
ClusterClientSet: clientSet,
WorkloadCounter: workloadCounter,
ResourceCounter: resourceCounter,
ClusterPhase: string(cls.Status.Phase),
ClusterDisplayName: clusterDisplayName,
TenantID: tenantID,
ComponentHealth: health,
})
}(clusters.Items[i])
}
Expand All @@ -182,6 +186,7 @@ func (c *cacher) getClusters() {
syncMap.Range(func(key, value interface{}) bool {
clusterID := key.(string)
val := value.(map[string]interface{})
clusterDisplayName := val[ClusterDisplayName].(string)
tenantID := val[TenantID].(string)
clusterPhase := val[ClusterPhase].(string)
if clusterPhase == string(platformv1.ClusterRunning) {
Expand All @@ -192,6 +197,7 @@ func (c *cacher) getClusters() {
c.clusterClientSets[clusterID] = clusterClientSet
c.clusterStatisticSet[clusterID] = &monitor.ClusterStatistic{
ClusterID: clusterID,
ClusterDisplayName: clusterDisplayName,
TenantID: tenantID,
ClusterPhase: clusterPhase,
NodeCount: int32(resourceCounter.NodeTotal),
Expand Down Expand Up @@ -222,9 +228,10 @@ func (c *cacher) getClusters() {
}
} else {
c.clusterStatisticSet[clusterID] = &monitor.ClusterStatistic{
ClusterID: clusterID,
ClusterPhase: clusterPhase,
TenantID: tenantID,
ClusterID: clusterID,
ClusterDisplayName: clusterDisplayName,
ClusterPhase: clusterPhase,
TenantID: tenantID,
}
}
return true
Expand Down Expand Up @@ -254,20 +261,24 @@ func (c *cacher) getMetricServerClientSet(ctx context.Context, cls *platformv1.C
func (c *cacher) getProjects() {
}

func (c *cacher) GetClusterOverviewResult(clusterIDs []string) *monitor.ClusterOverviewResult {
func (c *cacher) GetClusterOverviewResult(clusters []*platformv1.Cluster) *monitor.ClusterOverviewResult {
if atomic.LoadInt32(&c.firstLoad) == FirstLoad {
c.RLock()
defer c.RUnlock()
}

clusterStatistics := make([]*monitor.ClusterStatistic, 0)
result := &monitor.ClusterOverviewResult{}
result.ClusterCount = int32(len(clusterIDs))
result.ClusterCount = int32(len(clusters))
result.ClusterAbnormal = int32(c.clusterAbnormal)
result.NodeAbnormal = 0
result.WorkloadAbnormal = 0
for _, clusterID := range clusterIDs {
if clusterStatistic, ok := c.clusterStatisticSet[clusterID]; ok {
for i := 0; i < len(clusters); i++ {
cls := clusters[i]
if clusterStatistic, ok := c.clusterStatisticSet[cls.GetName()]; ok {
if clusterStatistic.ClusterDisplayName != cls.Spec.DisplayName && len(cls.Spec.DisplayName) > 0 {
clusterStatistic.ClusterDisplayName = cls.Spec.DisplayName
}
result.NodeCount += clusterStatistic.NodeCount
result.NodeAbnormal += clusterStatistic.NodeAbnormal
result.WorkloadCount += clusterStatistic.WorkloadCount
Expand Down

0 comments on commit 047aa28

Please sign in to comment.