Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(ui): Pod view #5091

Merged
merged 6 commits into from
Jan 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
394 changes: 394 additions & 0 deletions assets/swagger.json

Large diffs are not rendered by default.

12 changes: 12 additions & 0 deletions controller/cache/info.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
k8snode "k8s.io/kubernetes/pkg/util/node"

"github.com/argoproj/argo-cd/common"
Expand Down Expand Up @@ -311,6 +312,17 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
if reason != "" {
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Status Reason", Value: reason})
}

req, limit := resourcehelper.PodRequestsAndLimits(&pod)
cpuReq, cpuLimit, memoryReq, memoryLimit := req[v1.ResourceCPU], limit[v1.ResourceCPU], req[v1.ResourceMemory], limit[v1.ResourceMemory]

res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Node", Value: pod.Spec.NodeName})

res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Resource.CpuReq", Value: fmt.Sprintf("%d", cpuReq.MilliValue())})
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Resource.CpuLimit", Value: fmt.Sprintf("%d", cpuLimit.MilliValue())})
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Resource.MemoryReq", Value: fmt.Sprintf("%d", memoryReq.Value())})
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Resource.MemoryLimit", Value: fmt.Sprintf("%d", memoryLimit.Value())})

res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Containers", Value: fmt.Sprintf("%d/%d", readyContainers, totalContainers)})
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels()}
}
9 changes: 8 additions & 1 deletion controller/cache/info_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,14 @@ func TestGetPodInfo(t *testing.T) {

info := &ResourceInfo{}
populateNodeInfo(pod, info)
assert.Equal(t, []v1alpha1.InfoItem{{Name: "Containers", Value: "0/1"}}, info.Info)
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Node", Value: ""},
{Name: "Resource.CpuReq", Value: "0"},
{Name: "Resource.CpuLimit", Value: "0"},
{Name: "Resource.MemoryReq", Value: "0"},
{Name: "Resource.MemoryLimit", Value: "0"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
assert.Equal(t, []string{"bar"}, info.Images)
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
}
Expand Down
551 changes: 389 additions & 162 deletions pkg/apiclient/application/application.pb.go

Large diffs are not rendered by default.

101 changes: 101 additions & 0 deletions pkg/apiclient/application/application.pb.gw.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

57 changes: 57 additions & 0 deletions server/application/application.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ import (
"strings"
"time"

listers "k8s.io/client-go/listers/core/v1"

"github.com/Masterminds/semver"
"github.com/argoproj/gitops-engine/pkg/diff"
"github.com/argoproj/gitops-engine/pkg/sync/common"
Expand Down Expand Up @@ -80,6 +82,7 @@ type Server struct {
settingsMgr *settings.SettingsManager
cache *servercache.Cache
projInformer cache.SharedIndexInformer
nodeLister listers.NodeLister
}

// NewServer returns a new instance of the Application service
Expand All @@ -97,6 +100,7 @@ func NewServer(
projectLock sync.KeyLock,
settingsMgr *settings.SettingsManager,
projInformer cache.SharedIndexInformer,
nodeLister listers.NodeLister,
) application.ApplicationServiceServer {
appBroadcaster := &broadcasterHandler{}
appInformer.AddEventHandler(appBroadcaster)
Expand All @@ -116,6 +120,7 @@ func NewServer(
auditLogger: argo.NewAuditLogger(namespace, kubeclientset, "argocd-server"),
settingsMgr: settingsMgr,
projInformer: projInformer,
nodeLister: nodeLister,
}
}

Expand Down Expand Up @@ -212,6 +217,58 @@ func (s *Server) Create(ctx context.Context, q *application.ApplicationCreateReq
return updated, nil
}

// ListNodes returns nodes associated with an application
func (s *Server) ListNodes(ctx context.Context, q *application.NodeQuery) (*v1.NodeList, error) {
a, err := s.appLister.Get(*q.Name)
if err != nil {
return nil, err
}
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil {
return nil, err
}
tree, err := s.getAppResources(ctx, a)
if err != nil {
return nil, err
}
nodeRefs := make(map[string]bool)
for _, node := range tree.Nodes {
for _, item := range node.Info {
if item.Name == "Node" {
nodeRefs[item.Value] = true
}
}
}
nodes, err := s.nodeLister.List(labels.Everything())
if err != nil {
return nil, err
}
items := make([]v1.Node, 0)
for _, n := range nodes {
cur := *n
hostname := cur.ObjectMeta.Labels["kubernetes.io/hostname"]
if !nodeRefs[hostname] {
continue
}
items = append(items, v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"kubernetes.io/hostname": hostname},
},
Status: v1.NodeStatus{
Capacity: cur.Status.Capacity,
Allocatable: cur.Status.Allocatable,
NodeInfo: v1.NodeSystemInfo{
OperatingSystem: cur.Status.NodeInfo.OperatingSystem,
Architecture: cur.Status.NodeInfo.Architecture,
KernelVersion: cur.Status.NodeInfo.KernelVersion,
},
},
})
}
return &v1.NodeList{
Items: items,
}, nil
}

// GetManifests returns application manifests
func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationManifestQuery) (*apiclient.ManifestResponse, error) {
a, err := s.appLister.Get(*q.Name)
Expand Down
10 changes: 10 additions & 0 deletions server/application/application.proto
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ message ApplicationQuery {
optional string selector = 5 [(gogoproto.nullable) = false];
}

message NodeQuery {
// the application's name
optional string name = 1;
}

message RevisionMetadataQuery{
// the application's name
required string name = 1;
Expand Down Expand Up @@ -353,4 +358,9 @@ service ApplicationService {
rpc PodLogs(ApplicationPodLogsQuery) returns (stream LogEntry) {
option (google.api.http).get = "/api/v1/applications/{name}/pods/{podName}/logs";
}

// ListNodes returns nodes associated with an application
rpc ListNodes(NodeQuery) returns (k8s.io.api.core.v1.NodeList) {
option (google.api.http).get = "/api/v1/applications/{name}/nodes";
}
}
5 changes: 5 additions & 0 deletions server/application/application_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
kubetesting "k8s.io/client-go/testing"
k8scache "k8s.io/client-go/tools/cache"
Expand Down Expand Up @@ -161,6 +162,9 @@ func newTestAppServer(objects ...runtime.Object) *Server {
panic("Timed out waiting for caches to sync")
}

kfactory := informers.NewSharedInformerFactory(kubeclientset, 0)
nodeLister := kfactory.Core().V1().Nodes().Lister()

server := NewServer(
testNamespace,
kubeclientset,
Expand All @@ -175,6 +179,7 @@ func newTestAppServer(objects ...runtime.Object) *Server {
sync.NewKeyLock(),
settingsMgr,
projInformer,
nodeLister,
)
return server.(*Server)
}
Expand Down
18 changes: 16 additions & 2 deletions server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ import (
"strings"
"time"

"k8s.io/client-go/informers"
listers "k8s.io/client-go/listers/core/v1"

// nolint:staticcheck
golang_proto "github.com/golang/protobuf/proto"

Expand Down Expand Up @@ -155,6 +158,8 @@ type ArgoCDServer struct {
policyEnforcer *rbacpolicy.RBACPolicyEnforcer
appInformer cache.SharedIndexInformer
appLister applisters.ApplicationNamespaceLister
nodeInformer cache.SharedIndexInformer
nodeLister listers.NodeLister

// stopCh is the channel which when closed, will shutdown the Argo CD server
stopCh chan struct{}
Expand Down Expand Up @@ -218,6 +223,10 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts) *ArgoCDServer {
appInformer := factory.Argoproj().V1alpha1().Applications().Informer()
appLister := factory.Argoproj().V1alpha1().Applications().Lister().Applications(opts.Namespace)

kfactory := informers.NewSharedInformerFactory(opts.KubeClientset, 0)
nodeInformer := kfactory.Core().V1().Nodes().Informer()
nodeLister := kfactory.Core().V1().Nodes().Lister()

enf := rbac.NewEnforcer(opts.KubeClientset, opts.Namespace, common.ArgoCDRBACConfigMapName, nil)
enf.EnableEnforce(!opts.DisableAuth)
err = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV)
Expand All @@ -237,6 +246,8 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts) *ArgoCDServer {
projInformer: projInformer,
appInformer: appInformer,
appLister: appLister,
nodeInformer: nodeInformer,
nodeLister: nodeLister,
policyEnforcer: policyEnf,
}
}
Expand Down Expand Up @@ -337,6 +348,8 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int, metricsPort int) {

go a.projInformer.Run(ctx.Done())
go a.appInformer.Run(ctx.Done())
go a.nodeInformer.Run(ctx.Done())

go func() { a.checkServeErr("grpcS", grpcS.Serve(grpcL)) }()
go func() { a.checkServeErr("httpS", httpS.Serve(httpL)) }()
if a.useTLS() {
Expand All @@ -347,7 +360,7 @@ func (a *ArgoCDServer) Run(ctx context.Context, port int, metricsPort int) {
go a.rbacPolicyLoader(ctx)
go func() { a.checkServeErr("tcpm", tcpm.Serve()) }()
go func() { a.checkServeErr("metrics", metricsServ.ListenAndServe()) }()
if !cache.WaitForCacheSync(ctx.Done(), a.projInformer.HasSynced, a.appInformer.HasSynced) {
if !cache.WaitForCacheSync(ctx.Done(), a.projInformer.HasSynced, a.appInformer.HasSynced, a.nodeInformer.HasSynced) {
log.Fatal("Timed out waiting for project cache to sync")
}

Expand Down Expand Up @@ -556,7 +569,8 @@ func (a *ArgoCDServer) newGRPCServer() *grpc.Server {
a.enf,
projectLock,
a.settingsMgr,
a.projInformer)
a.projInformer,
a.nodeLister)
projectService := project.NewServer(a.Namespace, a.KubeClientset, a.AppClientset, a.enf, projectLock, a.sessionMgr, a.policyEnforcer, a.projInformer, a.settingsMgr)
settingsService := settings.NewServer(a.settingsMgr, a, a.DisableAuth)
accountService := account.NewServer(a.sessionMgr, a.settingsMgr, a.enf)
Expand Down
Loading