Skip to content

Commit

Permalink
feat: enhance quota managing
Browse files Browse the repository at this point in the history
  • Loading branch information
flyma authored and choujimmy committed Mar 6, 2020
1 parent 15036fc commit 5cb0d1b
Show file tree
Hide file tree
Showing 16 changed files with 624 additions and 114 deletions.
4 changes: 4 additions & 0 deletions api/business/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ type ProjectStatus struct {
CalculatedChildProjects []string
// +optional
CalculatedNamespaces []string
// +optional
CachedSpecClusters ClusterHard
}

// ProjectPhase defines the phase of project constructor.
Expand Down Expand Up @@ -191,6 +193,8 @@ type NamespaceStatus struct {
// Used represents the resources of a namespace that are used.
// +optional
Used ResourceList
// +optional
CachedSpecHard ResourceList
}

// NamespacePhase indicates the status of namespace in project.
Expand Down
564 changes: 464 additions & 100 deletions api/business/v1/generated.pb.go

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions api/business/v1/generated.proto

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions api/business/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ type ProjectStatus struct {
CalculatedChildProjects []string `json:"calculatedChildProjects,omitempty" protobuf:"bytes,4,rep,name=calculatedChildProjects"`
// +optional
CalculatedNamespaces []string `json:"calculatedNamespaces,omitempty" protobuf:"bytes,5,rep,name=calculatedNamespaces"`
// +optional
CachedSpecClusters ClusterHard `json:"cachedSpecClusters,omitempty" protobuf:"bytes,6,rep,name=cachedSpecClusters,casttype=ClusterHard"`
}

// ProjectPhase defines the phase of project constructor.
Expand Down Expand Up @@ -193,6 +195,8 @@ type NamespaceStatus struct {
// Used represents the resources of a namespace that are used.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,6,rep,name=used,casttype=ResourceList"`
// +optional
CachedSpecHard ResourceList `json:"cachedSpecHard,omitempty" protobuf:"bytes,7,rep,name=cachedSpecHard,casttype=ResourceList"`
}

// NamespacePhase indicates the status of namespace in project.
Expand Down
4 changes: 4 additions & 0 deletions api/business/v1/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions api/business/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions api/business/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

28 changes: 27 additions & 1 deletion api/openapi/zz_generated.openapi.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,11 @@ func recalculateProjectUsed(deleter *namespacedResourcesDeleter, namespace *v1.N
if project.Status.Clusters != nil {
clusterUsed, clusterUsedExist := project.Status.Clusters[namespace.Spec.ClusterName]
if clusterUsedExist {
for k, v := range namespace.Spec.Hard {
release := namespace.Spec.Hard
if namespace.Status.CachedSpecHard != nil {
release = namespace.Status.CachedSpecHard
}
for k, v := range release {
usedValue, ok := clusterUsed.Used[k]
if ok {
usedValue.Sub(v)
Expand Down
10 changes: 9 additions & 1 deletion pkg/business/controller/namespace/namespace_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,20 @@ type namespaceCache struct {
m map[string]*cachedNamespace
}

func (s *namespaceCache) getOrCreate(name string) *cachedNamespace {
func (s *namespaceCache) getOrCreate(name string, self *v1.Namespace) *cachedNamespace {
s.mu.Lock()
defer s.mu.Unlock()
namespace, ok := s.m[name]
if !ok {
namespace = &cachedNamespace{}
if self.Status.Phase == v1.NamespaceAvailable {
namespace.state = self.DeepCopy()
if self.Status.CachedSpecHard != nil {
namespace.state.Spec.Hard = self.Status.CachedSpecHard
} else {
namespace.state.Spec.Hard = self.Spec.Hard
}
}
s.m[name] = namespace
}
return namespace
Expand Down
11 changes: 8 additions & 3 deletions pkg/business/controller/namespace/namespace_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ func (c *Controller) syncItem(key string) error {
log.Warn("Unable to retrieve namespace from store", log.String("projectName", projectName), log.String("namespaceName", namespaceName), log.Err(err))
default:
if namespace.Status.Phase == v1.NamespacePending || namespace.Status.Phase == v1.NamespaceAvailable || namespace.Status.Phase == v1.NamespaceFailed {
cachedNamespace := c.cache.getOrCreate(key)
cachedNamespace := c.cache.getOrCreate(key, namespace)
err = c.processUpdate(cachedNamespace, namespace, key)
} else if namespace.Status.Phase == v1.NamespaceTerminating {
log.Info("Namespace has been terminated. Attempting to cleanup resources", log.String("projectName", projectName), log.String("namespaceName", namespaceName))
Expand Down Expand Up @@ -284,6 +284,7 @@ func (c *Controller) handlePhase(key string, cachedNamespace *cachedNamespace, n
// Since it's pending now, no need to set v1.NamespaceFailed.
return err
}
namespace.Status.CachedSpecHard = namespace.Spec.Hard
if err := c.ensureNamespaceOnCluster(namespace); err != nil {
namespace.Status.Phase = v1.NamespaceFailed
namespace.Status.Message = "ensureNamespaceOnCluster failed"
Expand All @@ -304,13 +305,18 @@ func (c *Controller) handlePhase(key string, cachedNamespace *cachedNamespace, n
namespace.Status.LastTransitionTime = metav1.Now()
return c.persistUpdate(namespace)
}
cachedHard := namespace.Status.CachedSpecHard
namespace.Status.CachedSpecHard = namespace.Spec.Hard
if err := c.ensureNamespaceOnCluster(namespace); err != nil {
namespace.Status.Phase = v1.NamespaceFailed
namespace.Status.Message = "ensureNamespaceOnCluster failed"
namespace.Status.Reason = err.Error()
namespace.Status.LastTransitionTime = metav1.Now()
return c.persistUpdate(namespace)
}
if !reflect.DeepEqual(namespace.Spec.Hard, cachedHard) {
_ = c.persistUpdate(namespace)
}
c.startNamespaceHealthCheck(key)
case v1.NamespaceFailed:
c.startNamespaceHealthCheck(key)
Expand All @@ -337,8 +343,7 @@ func (c *Controller) calculateProjectUsed(cachedNamespace *cachedNamespace, name
},
})
return c.persistUpdateProject(project)
}
if cachedNamespace.state != nil && !reflect.DeepEqual(cachedNamespace.state.Spec.Hard, namespace.Spec.Hard) {
} else if cachedNamespace.state != nil && !reflect.DeepEqual(cachedNamespace.state.Spec.Hard, namespace.Spec.Hard) {
if project.Status.Clusters == nil {
project.Status.Clusters = make(v1.ClusterUsed)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,11 @@ func recalculateParentProjectUsed(deleter *projectedResourcesDeleter, project *b
calculatedChildProjectNames.Delete(project.ObjectMeta.Name)
parentProject.Status.CalculatedChildProjects = calculatedChildProjectNames.List()
if parentProject.Status.Clusters != nil {
businessUtil.SubClusterHardFromUsed(&parentProject.Status.Clusters, project.Spec.Clusters)
release := project.Spec.Clusters
if project.Status.CachedSpecClusters != nil {
release = project.Status.CachedSpecClusters
}
businessUtil.SubClusterHardFromUsed(&parentProject.Status.Clusters, release)
}
_, err := deleter.businessClient.Projects().Update(parentProject)
if err != nil {
Expand Down
13 changes: 11 additions & 2 deletions pkg/business/controller/project/project_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ package project

import (
"sync"
"tkestack.io/tke/api/business/v1"

v1 "tkestack.io/tke/api/business/v1"
)

type cachedProject struct {
Expand All @@ -33,12 +34,20 @@ type projectCache struct {
m map[string]*cachedProject
}

func (s *projectCache) getOrCreate(name string) *cachedProject {
func (s *projectCache) getOrCreate(name string, self *v1.Project) *cachedProject {
s.mu.Lock()
defer s.mu.Unlock()
project, ok := s.m[name]
if !ok {
project = &cachedProject{}
if self.Status.Phase == v1.ProjectActive {
project.state = self.DeepCopy()
if self.Status.CachedSpecClusters != nil {
project.state.Spec.Clusters = self.Status.CachedSpecClusters
} else {
project.state.Spec.Clusters = self.Spec.Clusters
}
}
s.m[name] = project
}
return project
Expand Down
44 changes: 39 additions & 5 deletions pkg/business/controller/project/project_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ func (c *Controller) syncItem(key string) error {
log.Warn("Unable to retrieve project from store", log.String("projectName", key), log.Err(err))
default:
if project.Status.Phase == v1.ProjectActive {
cachedProject = c.cache.getOrCreate(key)
cachedProject = c.cache.getOrCreate(key, project)
err = c.processUpdate(cachedProject, project, key)
} else if project.Status.Phase == v1.ProjectTerminating {
log.Info("Project has been terminated. Attempting to cleanup resources", log.String("projectName", key))
Expand Down Expand Up @@ -277,17 +277,23 @@ func (c *Controller) handlePhase(key string, cachedProject *cachedProject, proje
parentProject.Status.Clusters = make(v1.ClusterUsed)
}
businessUtil.AddClusterHardToUsed(&parentProject.Status.Clusters, project.Spec.Clusters)
return c.persistUpdate(parentProject)
}
if cachedProject.state != nil && !reflect.DeepEqual(cachedProject.state.Spec.Clusters, project.Spec.Clusters) {
if err := c.persistUpdate(parentProject); err != nil {
return err
}
} else if cachedProject.state != nil && !reflect.DeepEqual(cachedProject.state.Spec.Clusters, project.Spec.Clusters) {
if parentProject.Status.Clusters == nil {
parentProject.Status.Clusters = make(v1.ClusterUsed)
}
// sub old
businessUtil.SubClusterHardFromUsed(&parentProject.Status.Clusters, cachedProject.state.Spec.Clusters)
// add new
businessUtil.AddClusterHardToUsed(&parentProject.Status.Clusters, project.Spec.Clusters)
return c.persistUpdate(parentProject)
if err := c.persistUpdate(parentProject); err != nil {
return err
}
}
if project != nil && !reflect.DeepEqual(project.Spec.Clusters, project.Status.CachedSpecClusters) {
return c.updateCache(project, project.Spec.Clusters)
}
}
return nil
Expand All @@ -312,3 +318,31 @@ func (c *Controller) persistUpdate(project *v1.Project) error {
}
return err
}

func (c *Controller) updateCache(project *v1.Project, newCache v1.ClusterHard) error {
var err error
project.Status.CachedSpecClusters = newCache
for i := 0; i < clientRetryCount; i++ {
_, err = c.client.BusinessV1().Projects().UpdateStatus(project)
if err == nil {
return nil
}
if errors.IsNotFound(err) {
log.Info(fmt.Sprintf("Not updateCache of non-existed project %s", project.ObjectMeta.Name), log.Err(err))
return nil
}
if errors.IsConflict(err) {
newProject, newErr := c.client.BusinessV1().Projects().Get(project.ObjectMeta.Name, metav1.GetOptions{})
if newErr == nil {
project = newProject
project.Status.CachedSpecClusters = newCache
} else {
log.Warn(fmt.Sprintf("Failed to get project %s", project.ObjectMeta.Name), log.Err(newErr))
}
}
log.Warn(fmt.Sprintf("Failed to updateCache of project %s", project.ObjectMeta.Name), log.Err(err))
time.Sleep(clientRetryInterval)
}
log.Error(fmt.Sprintf("Failed to updateCache of project %s", project.ObjectMeta.Name), log.Err(err))
return err
}
5 changes: 5 additions & 0 deletions pkg/business/registry/namespace/strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,11 @@ func (Strategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
}
namespace.Spec.TenantID = tenantID
}
if oldNamespace.Status.CachedSpecHard != nil {
namespace.Status.CachedSpecHard = oldNamespace.Status.CachedSpecHard
} else {
namespace.Status.CachedSpecHard = oldNamespace.Spec.Hard
}
}

// NamespaceScoped is false for namespaces.
Expand Down
Loading

0 comments on commit 5cb0d1b

Please sign in to comment.