Targets


alertmanager (0/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://alertmanager:9093/metrics
down instance="alertmanager:9093" job="alertmanager" 3.958s ago 5.516ms Get "http://alertmanager:9093/metrics": dial tcp: lookup alertmanager on 10.100.0.10:53: no such host

ingress-nginx-endpoints (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://172.31.18.67:10254/metrics
up instance="172.31.18.67:10254" job="ingress-nginx-endpoints" 3.545s ago 277.6ms

kubernetes-nodes-cadvisor (12/12 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-0-24.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="t3.medium" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="ingress" eks_amazonaws_com_nodegroup_image="ami-0af462231da3bf6f5" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1d" instance="ip-172-31-0-24.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-0-24.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="t3.medium" tenant="ingress" topology_ebs_csi_aws_com_zone="us-east-1d" topology_k8s_aws_zone_id="use1-az1" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1d" 2.032s ago 63ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-25-201.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="c3.2xlarge" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="prod" eks_amazonaws_com_nodegroup_image="ami-060573ecd3943c6ff" env="production" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1b" instance="ip-172-31-25-201.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-25-201.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="c3.2xlarge" topology_ebs_csi_aws_com_zone="us-east-1b" topology_k8s_aws_zone_id="use1-az4" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1b" 5.988s ago 71.61ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-31-0.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="c3.2xlarge" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="prod" eks_amazonaws_com_nodegroup_image="ami-060573ecd3943c6ff" env="production" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1b" instance="ip-172-31-31-0.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-31-0.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="c3.2xlarge" topology_ebs_csi_aws_com_zone="us-east-1b" topology_k8s_aws_zone_id="use1-az4" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1b" 3.103s ago 82.73ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-32-105.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="c3.2xlarge" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="prod" eks_amazonaws_com_nodegroup_image="ami-060573ecd3943c6ff" env="production" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1c" instance="ip-172-31-32-105.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-32-105.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="c3.2xlarge" topology_ebs_csi_aws_com_zone="us-east-1c" topology_k8s_aws_zone_id="use1-az6" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1c" 84ms ago 63.37ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-37-228.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="t3.medium" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="ingress" eks_amazonaws_com_nodegroup_image="ami-0af462231da3bf6f5" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1c" instance="ip-172-31-37-228.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-37-228.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="t3.medium" tenant="ingress" topology_ebs_csi_aws_com_zone="us-east-1c" topology_k8s_aws_zone_id="use1-az6" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1c" 4.571s ago 38.42ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-39-227.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="t3.medium" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="dev" eks_amazonaws_com_nodegroup_image="ami-0af462231da3bf6f5" env="dev" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1c" instance="ip-172-31-39-227.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-39-227.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="t3.medium" topology_ebs_csi_aws_com_zone="us-east-1c" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1c" 6.574s ago 86.63ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-50-123.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="c3.2xlarge" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="prod" eks_amazonaws_com_nodegroup_image="ami-060573ecd3943c6ff" env="production" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1e" instance="ip-172-31-50-123.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-50-123.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="c3.2xlarge" topology_ebs_csi_aws_com_zone="us-east-1e" topology_k8s_aws_zone_id="use1-az3" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1e" 2.053s ago 84.03ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-60-251.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="c3.2xlarge" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="prod" eks_amazonaws_com_nodegroup_image="ami-060573ecd3943c6ff" env="production" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1e" instance="ip-172-31-60-251.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-60-251.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="c3.2xlarge" topology_ebs_csi_aws_com_zone="us-east-1e" topology_k8s_aws_zone_id="use1-az3" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1e" 2.615s ago 62.46ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-66-2.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="t3.medium" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="ingress" eks_amazonaws_com_nodegroup_image="ami-0af462231da3bf6f5" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1f" instance="ip-172-31-66-2.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-66-2.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="t3.medium" tenant="ingress" topology_ebs_csi_aws_com_zone="us-east-1f" topology_k8s_aws_zone_id="use1-az5" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1f" 6.314s ago 35.19ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-84-233.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="c3.2xlarge" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="prod" eks_amazonaws_com_nodegroup_image="ami-060573ecd3943c6ff" env="production" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1a" instance="ip-172-31-84-233.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-84-233.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="c3.2xlarge" topology_ebs_csi_aws_com_zone="us-east-1a" topology_k8s_aws_zone_id="use1-az2" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1a" 2.084s ago 52.62ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-90-251.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="t3.medium" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="ingress" eks_amazonaws_com_nodegroup_image="ami-0af462231da3bf6f5" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1a" instance="ip-172-31-90-251.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-90-251.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="t3.medium" tenant="ingress" topology_ebs_csi_aws_com_zone="us-east-1a" topology_k8s_aws_zone_id="use1-az2" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1a" 2.326s ago 64.95ms
https://kubernetes.default.svc:443/api/v1/nodes/ip-172-31-94-119.ec2.internal/proxy/metrics/cadvisor
up beta_kubernetes_io_arch="amd64" beta_kubernetes_io_instance_type="t3.medium" beta_kubernetes_io_os="linux" eks_amazonaws_com_capacityType="ON_DEMAND" eks_amazonaws_com_nodegroup="dev" eks_amazonaws_com_nodegroup_image="ami-0af462231da3bf6f5" env="dev" failure_domain_beta_kubernetes_io_region="us-east-1" failure_domain_beta_kubernetes_io_zone="us-east-1a" instance="ip-172-31-94-119.ec2.internal" job="kubernetes-nodes-cadvisor" k8s_io_cloud_provider_aws="93e90ce7ba31ff4996cad58315638a91" kubernetes_io_arch="amd64" kubernetes_io_hostname="ip-172-31-94-119.ec2.internal" kubernetes_io_os="linux" node_kubernetes_io_instance_type="t3.medium" topology_ebs_csi_aws_com_zone="us-east-1a" topology_kubernetes_io_region="us-east-1" topology_kubernetes_io_zone="us-east-1a" 7.186s ago 88.73ms

kubernetes-pods (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

kubernetes-services (28/126 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://accounts-service-wepink.production-wepink.svc:80/metrics
down instance="accounts-service-wepink.production-wepink.svc:80" job="kubernetes-services" 8.119s ago 6.6ms server returned HTTP status 404 Not Found
http://accounts-service.production.svc:80/metrics
down instance="accounts-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 8.115s ago 7.265ms server returned HTTP status 404 Not Found
http://accounts-service.release.svc:80/metrics
down app="accounts-service" instance="accounts-service.release.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 1.592s ago 9.109ms server returned HTTP status 404 Not Found
http://accounts-service.staging.svc:80/metrics
down instance="accounts-service.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 686ms ago 7.174ms server returned HTTP status 404 Not Found
http://argocd-applicationset-controller.cd.svc:7000/metrics
down app_kubernetes_io_component="applicationset-controller" app_kubernetes_io_name="argocd-applicationset-controller" app_kubernetes_io_part_of="argocd" instance="argocd-applicationset-controller.cd.svc:7000" job="kubernetes-services" 3.669s ago 1.214ms server returned HTTP status 404 Not Found
http://argocd-applicationset-controller.cd.svc:8080/metrics
up app_kubernetes_io_component="applicationset-controller" app_kubernetes_io_name="argocd-applicationset-controller" app_kubernetes_io_part_of="argocd" instance="argocd-applicationset-controller.cd.svc:8080" job="kubernetes-services" 6.587s ago 2.387ms
http://argocd-dex-server.cd.svc:5556/metrics
down app_kubernetes_io_component="dex-server" app_kubernetes_io_name="argocd-dex-server" app_kubernetes_io_part_of="argocd" instance="argocd-dex-server.cd.svc:5556" job="kubernetes-services" 9.942s ago 4.373ms Get "http://argocd-dex-server.cd.svc:5556/metrics": dial tcp 10.100.11.83:5556: connect: connection refused
http://argocd-dex-server.cd.svc:5557/metrics
down app_kubernetes_io_component="dex-server" app_kubernetes_io_name="argocd-dex-server" app_kubernetes_io_part_of="argocd" instance="argocd-dex-server.cd.svc:5557" job="kubernetes-services" 6.632s ago 3.168ms Get "http://argocd-dex-server.cd.svc:5557/metrics": dial tcp 10.100.11.83:5557: connect: connection refused
http://argocd-dex-server.cd.svc:5558/metrics
down app_kubernetes_io_component="dex-server" app_kubernetes_io_name="argocd-dex-server" app_kubernetes_io_part_of="argocd" instance="argocd-dex-server.cd.svc:5558" job="kubernetes-services" 4.264s ago 4.316ms Get "http://argocd-dex-server.cd.svc:5558/metrics": dial tcp 10.100.11.83:5558: connect: connection refused
http://argocd-metrics.cd.svc:8082/metrics
up app_kubernetes_io_component="metrics" app_kubernetes_io_name="argocd-metrics" app_kubernetes_io_part_of="argocd" instance="argocd-metrics.cd.svc:8082" job="kubernetes-services" 7.891s ago 2.6ms
http://argocd-notifications-controller-metrics.cd.svc:9001/metrics
up app_kubernetes_io_component="notifications-controller" app_kubernetes_io_name="argocd-notifications-controller-metrics" app_kubernetes_io_part_of="argocd" instance="argocd-notifications-controller-metrics.cd.svc:9001" job="kubernetes-services" 5.347s ago 2.242ms
http://argocd-redis.cd.svc:6379/metrics
down app_kubernetes_io_component="redis" app_kubernetes_io_name="argocd-redis" app_kubernetes_io_part_of="argocd" instance="argocd-redis.cd.svc:6379" job="kubernetes-services" 3.169s ago 3.868ms Get "http://argocd-redis.cd.svc:6379/metrics": EOF
http://argocd-repo-server.cd.svc:8081/metrics
down app_kubernetes_io_component="repo-server" app_kubernetes_io_name="argocd-repo-server" app_kubernetes_io_part_of="argocd" instance="argocd-repo-server.cd.svc:8081" job="kubernetes-services" 3.969s ago 4.992ms Get "http://argocd-repo-server.cd.svc:8081/metrics": EOF
http://argocd-repo-server.cd.svc:8084/metrics
up app_kubernetes_io_component="repo-server" app_kubernetes_io_name="argocd-repo-server" app_kubernetes_io_part_of="argocd" instance="argocd-repo-server.cd.svc:8084" job="kubernetes-services" 7.907s ago 2.031ms
http://argocd-server-metrics.cd.svc:8083/metrics
up app_kubernetes_io_component="server" app_kubernetes_io_name="argocd-server-metrics" app_kubernetes_io_part_of="argocd" instance="argocd-server-metrics.cd.svc:8083" job="kubernetes-services" 2.673s ago 12.76ms
http://argocd-server.cd.svc:443/metrics
down app_kubernetes_io_component="server" app_kubernetes_io_name="argocd-server" app_kubernetes_io_part_of="argocd" instance="argocd-server.cd.svc:443" job="kubernetes-services" 5.192s ago 8.086ms Get "https://argocd-server.cd.svc:443/metrics": x509: certificate signed by unknown authority
http://argocd-server.cd.svc:80/metrics
down app_kubernetes_io_component="server" app_kubernetes_io_name="argocd-server" app_kubernetes_io_part_of="argocd" instance="argocd-server.cd.svc:80" job="kubernetes-services" 2.203s ago 9.131ms Get "https://argocd-server.cd.svc:80/metrics": x509: certificate signed by unknown authority
http://cert-manager-webhook.cert-manager.svc:443/metrics
down app="webhook" app_kubernetes_io_component="webhook" app_kubernetes_io_instance="cert-manager" app_kubernetes_io_name="webhook" app_kubernetes_io_version="v1.13.1" instance="cert-manager-webhook.cert-manager.svc:443" job="kubernetes-services" 5.983s ago 2.888ms server returned HTTP status 400 Bad Request
http://cert-manager.cert-manager.svc:9402/metrics
up app="cert-manager" app_kubernetes_io_component="controller" app_kubernetes_io_instance="cert-manager" app_kubernetes_io_name="cert-manager" app_kubernetes_io_version="v1.13.1" instance="cert-manager.cert-manager.svc:9402" job="kubernetes-services" 1.924s ago 5.089ms
http://chi-signoz-clickhouse-cluster-0-0.production.svc:8123/metrics
down app_kubernetes_io_component="clickhouse" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="clickhouse" app_kubernetes_io_version="24.1.2" clickhouse_altinity_com_Service="host" clickhouse_altinity_com_app="chop" clickhouse_altinity_com_chi="signoz-clickhouse" clickhouse_altinity_com_cluster="cluster" clickhouse_altinity_com_namespace="production" clickhouse_altinity_com_object_version="f484f2901c327f77eb31d719df17a51cc03e3957" clickhouse_altinity_com_replica="0" clickhouse_altinity_com_shard="0" helm_sh_chart="clickhouse-24.1.7" instance="chi-signoz-clickhouse-cluster-0-0.production.svc:8123" job="kubernetes-services" 8.46s ago 4.298ms server returned HTTP status 404 Not Found
http://chi-signoz-clickhouse-cluster-0-0.production.svc:9000/metrics
down app_kubernetes_io_component="clickhouse" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="clickhouse" app_kubernetes_io_version="24.1.2" clickhouse_altinity_com_Service="host" clickhouse_altinity_com_app="chop" clickhouse_altinity_com_chi="signoz-clickhouse" clickhouse_altinity_com_cluster="cluster" clickhouse_altinity_com_namespace="production" clickhouse_altinity_com_object_version="f484f2901c327f77eb31d719df17a51cc03e3957" clickhouse_altinity_com_replica="0" clickhouse_altinity_com_shard="0" helm_sh_chart="clickhouse-24.1.7" instance="chi-signoz-clickhouse-cluster-0-0.production.svc:9000" job="kubernetes-services" 6.141s ago 4ms server returned HTTP status 400 Bad Request
http://chi-signoz-clickhouse-cluster-0-0.production.svc:9009/metrics
down app_kubernetes_io_component="clickhouse" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="clickhouse" app_kubernetes_io_version="24.1.2" clickhouse_altinity_com_Service="host" clickhouse_altinity_com_app="chop" clickhouse_altinity_com_chi="signoz-clickhouse" clickhouse_altinity_com_cluster="cluster" clickhouse_altinity_com_namespace="production" clickhouse_altinity_com_object_version="f484f2901c327f77eb31d719df17a51cc03e3957" clickhouse_altinity_com_replica="0" clickhouse_altinity_com_shard="0" helm_sh_chart="clickhouse-24.1.7" instance="chi-signoz-clickhouse-cluster-0-0.production.svc:9009" job="kubernetes-services" 2.355s ago 4.586ms server returned HTTP status 404 Not Found
http://dashboards-service.production.svc:80/metrics
down app="dashboards-service" instance="dashboards-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 6.176s ago 3.499ms Get "http://dashboards-service.production.svc:80/metrics": dial tcp 10.100.182.188:80: connect: connection refused
http://dashboards-service.staging.svc:80/metrics
down app="dashboards-service" instance="dashboards-service.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 5.566s ago 3.062ms Get "http://dashboards-service.staging.svc:80/metrics": dial tcp 10.100.42.93:80: connect: connection refused
http://database-service-lb.staging.svc:3306/metrics
down instance="database-service-lb.staging.svc:3306" job="kubernetes-services" k8slens_edit_resource_version="v1" 1.557s ago 3.479ms Get "http://database-service-lb.staging.svc:3306/metrics": dial tcp 10.100.75.254:3306: connect: connection refused
http://database-service.staging.svc:3306/metrics
down instance="database-service.staging.svc:3306" job="kubernetes-services" k8slens_edit_resource_version="v1" 8.784s ago 2.746ms Get "http://database-service.staging.svc:3306/metrics": dial tcp 10.100.164.114:3306: connect: connection refused
http://facebook-service-wepink.production-wepink.svc:80/metrics
down instance="facebook-service-wepink.production-wepink.svc:80" job="kubernetes-services" 8.242s ago 4.125ms server returned HTTP status 404 Not Found
http://facebook-service.production.svc:80/metrics
down instance="facebook-service.production.svc:80" job="kubernetes-services" 8.905s ago 4.007ms server returned HTTP status 404 Not Found
http://facebook-service.staging.svc:80/metrics
down instance="facebook-service.staging.svc:80" job="kubernetes-services" 1.275s ago 5.561ms server returned HTTP status 404 Not Found
http://google-service-wepink.production-wepink.svc:80/metrics
down instance="google-service-wepink.production-wepink.svc:80" job="kubernetes-services" 3.824s ago 3.01ms Get "http://google-service-wepink.production-wepink.svc:80/metrics": dial tcp 10.100.171.160:80: connect: connection refused
http://google-service.production.svc:80/metrics
down instance="google-service.production.svc:80" job="kubernetes-services" 8.84s ago 4.208ms server returned HTTP status 404 Not Found
http://google-service.staging.svc:80/metrics
down instance="google-service.staging.svc:80" job="kubernetes-services" 2.175s ago 6.659ms server returned HTTP status 404 Not Found
http://grafana-service.production.svc:80/metrics
up instance="grafana-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 558ms ago 24.55ms
http://grafana-tempo-compactor.production.svc:3100/metrics
up app_kubernetes_io_component="compactor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-compactor.production.svc:3100" job="kubernetes-services" 7.937s ago 4.878ms
http://grafana-tempo-distributor-discovery.production.svc:3100/metrics
up app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor-discovery.production.svc:3100" job="kubernetes-services" prometheus_io_service_monitor="false" 3.184s ago 4.085ms
http://grafana-tempo-distributor-discovery.production.svc:4317/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor-discovery.production.svc:4317" job="kubernetes-services" prometheus_io_service_monitor="false" 123ms ago 37.37ms Get "http://grafana-tempo-distributor-discovery.production.svc:4317/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x06\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00"
http://grafana-tempo-distributor-discovery.production.svc:4318/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor-discovery.production.svc:4318" job="kubernetes-services" prometheus_io_service_monitor="false" 4.208s ago 765.5us server returned HTTP status 404 Not Found
http://grafana-tempo-distributor-discovery.production.svc:55680/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor-discovery.production.svc:55680" job="kubernetes-services" prometheus_io_service_monitor="false" 8.284s ago 3.096ms Get "http://grafana-tempo-distributor-discovery.production.svc:55680/metrics": dial tcp 172.31.25.88:55680: connect: connection refused
http://grafana-tempo-distributor.production.svc:3100/metrics
up app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor.production.svc:3100" job="kubernetes-services" 7.981s ago 4.988ms
http://grafana-tempo-distributor.production.svc:4317/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor.production.svc:4317" job="kubernetes-services" 4.777s ago 4.425ms Get "http://grafana-tempo-distributor.production.svc:4317/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x06\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00"
http://grafana-tempo-distributor.production.svc:4318/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor.production.svc:4318" job="kubernetes-services" 8.728s ago 642.4us server returned HTTP status 404 Not Found
http://grafana-tempo-distributor.production.svc:55680/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor.production.svc:55680" job="kubernetes-services" 6.093s ago 4.78ms Get "http://grafana-tempo-distributor.production.svc:55680/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x06\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00"
http://grafana-tempo-distributor.production.svc:9095/metrics
down app_kubernetes_io_component="distributor" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-distributor.production.svc:9095" job="kubernetes-services" 476ms ago 4.747ms Get "http://grafana-tempo-distributor.production.svc:9095/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00\x00\x03\x00\x00\x00d"
http://grafana-tempo-gossip-ring.production.svc:7946/metrics
down app_kubernetes_io_component="gossip-ring" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-gossip-ring.production.svc:7946" job="kubernetes-services" 1.876s ago 4.388ms Get "http://grafana-tempo-gossip-ring.production.svc:7946/metrics": read tcp 172.31.29.212:36580->172.31.53.161:7946: read: connection reset by peer
http://grafana-tempo-ingester-discovery.production.svc:3100/metrics
up app_kubernetes_io_component="ingester" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-ingester-discovery.production.svc:3100" job="kubernetes-services" prometheus_io_service_monitor="false" 3.884s ago 4.504ms
http://grafana-tempo-ingester-discovery.production.svc:9095/metrics
down app_kubernetes_io_component="ingester" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-ingester-discovery.production.svc:9095" job="kubernetes-services" prometheus_io_service_monitor="false" 1.887s ago 5.63ms Get "http://grafana-tempo-ingester-discovery.production.svc:9095/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00\x00\x03\x00\x00\x00d"
http://grafana-tempo-ingester.production.svc:3100/metrics
up app_kubernetes_io_component="ingester" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-ingester.production.svc:3100" job="kubernetes-services" 5.733s ago 4.288ms
http://grafana-tempo-ingester.production.svc:9095/metrics
down app_kubernetes_io_component="ingester" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-ingester.production.svc:9095" job="kubernetes-services" 9.114s ago 5.903ms Get "http://grafana-tempo-ingester.production.svc:9095/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00\x00\x03\x00\x00\x00d"
http://grafana-tempo-memcached.production.svc:11211/metrics
down app_kubernetes_io_component="memcached" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-memcached.production.svc:11211" job="kubernetes-services" 3.67s ago 4.227ms Get "http://grafana-tempo-memcached.production.svc:11211/metrics": EOF
http://grafana-tempo-memcached.production.svc:9150/metrics
down app_kubernetes_io_component="memcached" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-memcached.production.svc:9150" job="kubernetes-services" 4.513s ago 3.195ms Get "http://grafana-tempo-memcached.production.svc:9150/metrics": dial tcp 10.100.70.76:9150: connect: connection refused
http://grafana-tempo-querier.production.svc:3100/metrics
up app_kubernetes_io_component="querier" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-querier.production.svc:3100" job="kubernetes-services" 4.757s ago 4.152ms
http://grafana-tempo-querier.production.svc:9095/metrics
down app_kubernetes_io_component="querier" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-querier.production.svc:9095" job="kubernetes-services" 8.051s ago 4.439ms Get "http://grafana-tempo-querier.production.svc:9095/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00\x00\x03\x00\x00\x00d"
http://grafana-tempo-query-frontend-discovery.production.svc:3100/metrics
up app_kubernetes_io_component="query-frontend" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-query-frontend-discovery.production.svc:3100" job="kubernetes-services" 1.118s ago 4.191ms
http://grafana-tempo-query-frontend-discovery.production.svc:9095/metrics
down app_kubernetes_io_component="query-frontend" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-query-frontend-discovery.production.svc:9095" job="kubernetes-services" 8.503s ago 4.984ms Get "http://grafana-tempo-query-frontend-discovery.production.svc:9095/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00\x00\x03\x00\x00\x00d"
http://grafana-tempo-query-frontend-discovery.production.svc:9096/metrics
down app_kubernetes_io_component="query-frontend" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-query-frontend-discovery.production.svc:9096" job="kubernetes-services" 6.27s ago 4.294ms Get "http://grafana-tempo-query-frontend-discovery.production.svc:9096/metrics": dial tcp 172.31.58.55:9096: connect: connection refused
http://grafana-tempo-query-frontend.production.svc:3100/metrics
up app_kubernetes_io_component="query-frontend" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-query-frontend.production.svc:3100" job="kubernetes-services" 8.191s ago 4.194ms
http://grafana-tempo-query-frontend.production.svc:9095/metrics
down app_kubernetes_io_component="query-frontend" app_kubernetes_io_instance="grafana-tempo" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="tempo" app_kubernetes_io_version="2.6.0" helm_sh_chart="tempo-distributed-1.18.2" instance="grafana-tempo-query-frontend.production.svc:9095" job="kubernetes-services" 5.942s ago 4.973ms Get "http://grafana-tempo-query-frontend.production.svc:9095/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\f\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00\x00\x03\x00\x00\x00d"
http://ingress-nginx-controller-admission.ingress-nginx.svc:443/metrics
down app_kubernetes_io_component="controller" app_kubernetes_io_instance="ingress-nginx" app_kubernetes_io_name="ingress-nginx" app_kubernetes_io_part_of="ingress-nginx" app_kubernetes_io_version="1.8.2" instance="ingress-nginx-controller-admission.ingress-nginx.svc:443" job="kubernetes-services" 3.738s ago 3.796ms server returned HTTP status 400 Bad Request
http://ingress-nginx-controller.ingress-nginx.svc:443/metrics
down app_kubernetes_io_component="controller" app_kubernetes_io_instance="ingress-nginx" app_kubernetes_io_name="ingress-nginx" app_kubernetes_io_part_of="ingress-nginx" app_kubernetes_io_version="1.8.2" instance="ingress-nginx-controller.ingress-nginx.svc:443" job="kubernetes-services" k8slens_edit_resource_version="v1" 8.797s ago 3.863ms server returned HTTP status 400 Bad Request
http://ingress-nginx-controller.ingress-nginx.svc:80/metrics
down app_kubernetes_io_component="controller" app_kubernetes_io_instance="ingress-nginx" app_kubernetes_io_name="ingress-nginx" app_kubernetes_io_part_of="ingress-nginx" app_kubernetes_io_version="1.8.2" instance="ingress-nginx-controller.ingress-nginx.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 9.692s ago 530us server returned HTTP status 404 Not Found
http://ingress-nginx-controller.ingress-nginx.svc:9913/metrics
up app_kubernetes_io_component="controller" app_kubernetes_io_instance="ingress-nginx" app_kubernetes_io_name="ingress-nginx" app_kubernetes_io_part_of="ingress-nginx" app_kubernetes_io_version="1.8.2" instance="ingress-nginx-controller.ingress-nginx.svc:9913" job="kubernetes-services" k8slens_edit_resource_version="v1" 5.771s ago 268.3ms
http://insights-service.production.svc:80/metrics
down app="insights-service" instance="insights-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 2.867s ago 5.96ms server returned HTTP status 404 Not Found
http://kube-dns.kube-system.svc:53/metrics
down eks_amazonaws_com_component="kube-dns" instance="kube-dns.kube-system.svc:53" job="kubernetes-services" k8s_app="kube-dns" kubernetes_io_cluster_service="true" kubernetes_io_name="CoreDNS" 9.546s ago 3.005s Get "http://kube-dns.kube-system.svc:53/metrics": EOF
http://kube-state-metrics.default.svc:8080/metrics
up app_kubernetes_io_component="metrics" app_kubernetes_io_instance="kube-state-metrics" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="kube-state-metrics" app_kubernetes_io_part_of="kube-state-metrics" app_kubernetes_io_version="2.12.0" helm_sh_chart="kube-state-metrics-5.19.0" instance="kube-state-metrics.default.svc:8080" job="kubernetes-services" 3.673s ago 63.63ms
http://kubelet.kube-system.svc:10250/metrics
down app_kubernetes_io_managed_by="prometheus-operator" app_kubernetes_io_name="kubelet" instance="kubelet.kube-system.svc:10250" job="kubernetes-services" k8s_app="kubelet" 4.887s ago 8.44ms server returned HTTP status 400 Bad Request
http://kubelet.kube-system.svc:10255/metrics
down app_kubernetes_io_managed_by="prometheus-operator" app_kubernetes_io_name="kubelet" instance="kubelet.kube-system.svc:10255" job="kubernetes-services" k8s_app="kubelet" 6.039s ago 11.64ms Get "http://kubelet.kube-system.svc:10255/metrics": dial tcp 172.31.50.123:10255: connect: connection refused
http://kubelet.kube-system.svc:4194/metrics
down app_kubernetes_io_managed_by="prometheus-operator" app_kubernetes_io_name="kubelet" instance="kubelet.kube-system.svc:4194" job="kubernetes-services" k8s_app="kubelet" 8.41s ago 12.26ms Get "http://kubelet.kube-system.svc:4194/metrics": dial tcp 172.31.0.24:4194: connect: connection refused
http://kubernetes.default.svc:443/metrics
down component="apiserver" instance="kubernetes.default.svc:443" job="kubernetes-services" provider="kubernetes" 7.063s ago 4.512ms server returned HTTP status 400 Bad Request
http://metrics-server.kube-system.svc:443/metrics
down instance="metrics-server.kube-system.svc:443" job="kubernetes-services" k8s_app="metrics-server" 4.267s ago 4.668ms server returned HTTP status 400 Bad Request
http://nemu-api.production.svc:80/metrics
down instance="nemu-api.production.svc:80" job="kubernetes-services" 3.289s ago 3.593ms server returned HTTP status 404 Not Found
http://nemu-api.staging.svc:80/metrics
down instance="nemu-api.staging.svc:80" job="kubernetes-services" 9.832s ago 5.405ms server returned HTTP status 404 Not Found
http://node-exporter.default.svc:9100/metrics
up app="node-exporter-svc" instance="node-exporter.default.svc:9100" job="kubernetes-services" 1.399s ago 24.25ms
http://notifications-service.production.svc:80/metrics
down app="notifications-service" instance="notifications-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 7.079s ago 3.698ms Get "http://notifications-service.production.svc:80/metrics": dial tcp 10.100.119.51:80: connect: connection refused
http://notifications-service.staging.svc:80/metrics
down app="notifications-service" instance="notifications-service.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 9.024s ago 3.113ms Get "http://notifications-service.staging.svc:80/metrics": dial tcp 10.100.90.187:80: connect: connection refused
http://opentelemetry-collector.production.svc:4317/metrics
down app_kubernetes_io_instance="opentelemetry-collector" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="opentelemetry-collector" app_kubernetes_io_version="0.111.0" component="standalone-collector" helm_sh_chart="opentelemetry-collector-0.108.0" instance="opentelemetry-collector.production.svc:4317" job="kubernetes-services" 6.167s ago 3.136ms Get "http://opentelemetry-collector.production.svc:4317/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x06\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00"
http://opentelemetry-collector.production.svc:4318/metrics
down app_kubernetes_io_instance="opentelemetry-collector" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="opentelemetry-collector" app_kubernetes_io_version="0.111.0" component="standalone-collector" helm_sh_chart="opentelemetry-collector-0.108.0" instance="opentelemetry-collector.production.svc:4318" job="kubernetes-services" 1.037s ago 424.4us server returned HTTP status 404 Not Found
http://payments-service.production.svc:80/metrics
up app="payments-service" instance="payments-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 1.861s ago 7.251ms
http://payments-service.staging.svc:80/metrics
up instance="payments-service.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 3.107s ago 15.1ms
http://prometheus-operated.default.svc:9090/metrics
up instance="prometheus-operated.default.svc:9090" job="kubernetes-services" managed_by="prometheus-operator" operated_prometheus="true" 9.293s ago 7.52ms
http://prometheus-operator.default.svc:8080/metrics
up app_kubernetes_io_component="controller" app_kubernetes_io_name="prometheus-operator" app_kubernetes_io_version="0.73.2" instance="prometheus-operator.default.svc:8080" job="kubernetes-services" 6.496s ago 2.757ms
http://prometheus.default.svc:9090/metrics
up app="prometheus" instance="prometheus.default.svc:9090" job="kubernetes-services" 9.118s ago 6.576ms
http://redis-lb.release.svc:6379/metrics
down instance="redis-lb.release.svc:6379" job="kubernetes-services" k8slens_edit_resource_version="v1" 3.448s ago 5.429ms Get "http://redis-lb.release.svc:6379/metrics": EOF
http://redis-service-lb.staging.svc:6379/metrics
down instance="redis-service-lb.staging.svc:6379" job="kubernetes-services" 6.808s ago 5.225ms Get "http://redis-service-lb.staging.svc:6379/metrics": EOF
http://redis-service.staging.svc:6379/metrics
down instance="redis-service.staging.svc:6379" job="kubernetes-services" k8slens_edit_resource_version="v1" 5.833s ago 5.384ms Get "http://redis-service.staging.svc:6379/metrics": EOF
http://redis.release.svc:6379/metrics
down instance="redis.release.svc:6379" job="kubernetes-services" 4.697s ago 4.385ms Get "http://redis.release.svc:6379/metrics": EOF
http://reports-service-wepink.production-wepink.svc:80/metrics
down instance="reports-service-wepink.production-wepink.svc:80" job="kubernetes-services" 967ms ago 3.806ms Get "http://reports-service-wepink.production-wepink.svc:80/metrics": dial tcp 10.100.19.179:80: connect: connection refused
http://signoz-alertmanager-headless.production.svc:9093/metrics
up app_kubernetes_io_component="alertmanager" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-alertmanager-headless.production.svc:9093" job="kubernetes-services" 4.032s ago 3.297ms
http://signoz-alertmanager.production.svc:9093/metrics
up app_kubernetes_io_component="alertmanager" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-alertmanager.production.svc:9093" job="kubernetes-services" 5.233s ago 3.281ms
http://signoz-clickhouse-operator-metrics.production.svc:8888/metrics
up app_kubernetes_io_component="operator" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="clickhouse" app_kubernetes_io_version="24.1.2" clickhouse_altinity_com_chop="0.21.2" helm_sh_chart="clickhouse-24.1.7" instance="signoz-clickhouse-operator-metrics.production.svc:8888" job="kubernetes-services" 9.31s ago 41.69ms
http://signoz-clickhouse.production.svc:8123/metrics
down app_kubernetes_io_component="clickhouse" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="clickhouse" app_kubernetes_io_version="24.1.2" clickhouse_altinity_com_Service="chi" clickhouse_altinity_com_app="chop" clickhouse_altinity_com_chi="signoz-clickhouse" clickhouse_altinity_com_namespace="production" clickhouse_altinity_com_object_version="cdd9427f0c604b10e178aaa8d6b74957ea718014" helm_sh_chart="clickhouse-24.1.7" instance="signoz-clickhouse.production.svc:8123" job="kubernetes-services" 440ms ago 4.154ms server returned HTTP status 404 Not Found
http://signoz-clickhouse.production.svc:9000/metrics
down app_kubernetes_io_component="clickhouse" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="clickhouse" app_kubernetes_io_version="24.1.2" clickhouse_altinity_com_Service="chi" clickhouse_altinity_com_app="chop" clickhouse_altinity_com_chi="signoz-clickhouse" clickhouse_altinity_com_namespace="production" clickhouse_altinity_com_object_version="cdd9427f0c604b10e178aaa8d6b74957ea718014" helm_sh_chart="clickhouse-24.1.7" instance="signoz-clickhouse.production.svc:9000" job="kubernetes-services" 2.478s ago 3.99ms server returned HTTP status 400 Bad Request
http://signoz-frontend.production.svc:3301/metrics
down app_kubernetes_io_component="frontend" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-frontend.production.svc:3301" job="kubernetes-services" 5.737s ago 1.245ms "INVALID" is not a valid start token
http://signoz-otel-collector-metrics.production.svc:13133/metrics
down app_kubernetes_io_component="otel-collector-metrics" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector-metrics.production.svc:13133" job="kubernetes-services" 6.269s ago 1.11ms "INVALID" is not a valid start token
http://signoz-otel-collector.production.svc:14250/metrics
down app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:14250" job="kubernetes-services" 9.475s ago 5.369ms Get "http://signoz-otel-collector.production.svc:14250/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x06\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00"
http://signoz-otel-collector.production.svc:14268/metrics
down app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:14268" job="kubernetes-services" 4.272s ago 1.596ms server returned HTTP status 404 Not Found
http://signoz-otel-collector.production.svc:4317/metrics
down app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:4317" job="kubernetes-services" 9.538s ago 5.466ms Get "http://signoz-otel-collector.production.svc:4317/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x06\x04\x00\x00\x00\x00\x00\x00\x05\x00\x00@\x00"
http://signoz-otel-collector.production.svc:4318/metrics
down app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:4318" job="kubernetes-services" 695ms ago 2.549ms server returned HTTP status 404 Not Found
http://signoz-otel-collector.production.svc:8081/metrics
down app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:8081" job="kubernetes-services" 8.536s ago 1.676ms server returned HTTP status 404 Not Found
http://signoz-otel-collector.production.svc:8082/metrics
down app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:8082" job="kubernetes-services" 410ms ago 13.48ms server returned HTTP status 404 Not Found
http://signoz-otel-collector.production.svc:8888/metrics
up app_kubernetes_io_component="otel-collector" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-otel-collector.production.svc:8888" job="kubernetes-services" 2.289s ago 4.51ms
http://signoz-query-service.production.svc:4320/metrics
down app_kubernetes_io_component="query-service" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-query-service.production.svc:4320" job="kubernetes-services" 4.43s ago 748us server returned HTTP status 404 Not Found
http://signoz-query-service.production.svc:8080/metrics
down app_kubernetes_io_component="query-service" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-query-service.production.svc:8080" job="kubernetes-services" 7.734s ago 953.1us server returned HTTP status 404 Not Found
http://signoz-query-service.production.svc:8085/metrics
down app_kubernetes_io_component="query-service" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="signoz" app_kubernetes_io_version="0.55.0" helm_sh_chart="signoz-0.53.1" instance="signoz-query-service.production.svc:8085" job="kubernetes-services" 3.472s ago 950.2us server returned HTTP status 404 Not Found
http://signoz-zookeeper-headless.production.svc:2181/metrics
down app_kubernetes_io_component="zookeeper" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper-headless.production.svc:2181" job="kubernetes-services" 8.753s ago 3.981ms Get "http://signoz-zookeeper-headless.production.svc:2181/metrics": EOF
http://signoz-zookeeper-headless.production.svc:2888/metrics
down app_kubernetes_io_component="zookeeper" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper-headless.production.svc:2888" job="kubernetes-services" 2.912s ago 3.732ms Get "http://signoz-zookeeper-headless.production.svc:2888/metrics": dial tcp 172.31.25.91:2888: connect: connection refused
http://signoz-zookeeper-headless.production.svc:3888/metrics
down app_kubernetes_io_component="zookeeper" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper-headless.production.svc:3888" job="kubernetes-services" 7.78s ago 3.913ms Get "http://signoz-zookeeper-headless.production.svc:3888/metrics": dial tcp 172.31.25.91:3888: connect: connection refused
http://signoz-zookeeper-metrics.production.svc:9141/metrics
up app_kubernetes_io_component="metrics" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper-metrics.production.svc:9141" job="kubernetes-services" 3.664s ago 2.553ms
http://signoz-zookeeper.production.svc:2181/metrics
down app_kubernetes_io_component="zookeeper" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper.production.svc:2181" job="kubernetes-services" 2.136s ago 3.533ms Get "http://signoz-zookeeper.production.svc:2181/metrics": EOF
http://signoz-zookeeper.production.svc:2888/metrics
down app_kubernetes_io_component="zookeeper" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper.production.svc:2888" job="kubernetes-services" 8.916s ago 2.825ms Get "http://signoz-zookeeper.production.svc:2888/metrics": dial tcp 10.100.139.30:2888: connect: connection refused
http://signoz-zookeeper.production.svc:3888/metrics
down app_kubernetes_io_component="zookeeper" app_kubernetes_io_instance="signoz" app_kubernetes_io_managed_by="Helm" app_kubernetes_io_name="zookeeper" helm_sh_chart="zookeeper-11.4.2" instance="signoz-zookeeper.production.svc:3888" job="kubernetes-services" 771ms ago 3.236ms Get "http://signoz-zookeeper.production.svc:3888/metrics": dial tcp 10.100.139.30:3888: connect: connection refused
http://trackings-consumer-wepink.production-wepink.svc:80/metrics
down instance="trackings-consumer-wepink.production-wepink.svc:80" job="kubernetes-services" 2.931s ago 5.512ms server returned HTTP status 404 Not Found
http://trackings-consumer.production.svc:80/metrics
down app="trackings-consumer" instance="trackings-consumer.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 1.209s ago 7.195ms server returned HTTP status 404 Not Found
http://trackings-service-wepink.production-wepink.svc:80/metrics
down instance="trackings-service-wepink.production-wepink.svc:80" job="kubernetes-services" 1.834s ago 6.025ms server returned HTTP status 404 Not Found
http://trackings-service.production.svc:4333/metrics
down instance="trackings-service.production.svc:4333" job="kubernetes-services" k8slens_edit_resource_version="v1" 5.832s ago 4.603ms Get "http://trackings-service.production.svc:4333/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\b\a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
http://trackings-service.production.svc:80/metrics
down instance="trackings-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 904ms ago 29.81ms server returned HTTP status 404 Not Found
http://trackings-service.release.svc:4333/metrics
down app="trackings-service" instance="trackings-service.release.svc:4333" job="kubernetes-services" k8slens_edit_resource_version="v1" 425ms ago 6.476ms Get "http://trackings-service.release.svc:4333/metrics": net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\b\a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
http://trackings-service.release.svc:80/metrics
down app="trackings-service" instance="trackings-service.release.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 846ms ago 6.582ms server returned HTTP status 404 Not Found
http://trackings-service.staging.svc:4333/metrics
down instance="trackings-service.staging.svc:4333" job="kubernetes-services" k8slens_edit_resource_version="v1" 2.239s ago 3.053ms Get "http://trackings-service.staging.svc:4333/metrics": dial tcp 10.100.112.153:4333: connect: connection refused
http://trackings-service.staging.svc:80/metrics
down instance="trackings-service.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 8.935s ago 5.406ms server returned HTTP status 404 Not Found
http://webhook-service.production.svc:80/metrics
down instance="webhook-service.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 6.669s ago 5.365ms server returned HTTP status 404 Not Found
http://webhooks-consumer-wepink.production-wepink.svc:80/metrics
down instance="webhooks-consumer-wepink.production-wepink.svc:80" job="kubernetes-services" 4.501s ago 7.277ms server returned HTTP status 404 Not Found
http://webhooks-consumer.production.svc:80/metrics
down app="webhooks-consumer" instance="webhooks-consumer.production.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 9.993s ago 6.007ms server returned HTTP status 404 Not Found
http://webhooks-consumer.staging.svc:80/metrics
down app="webhooks-consumer" instance="webhooks-consumer.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 7.472s ago 6.208ms server returned HTTP status 404 Not Found
http://webhooks-service-wepink.production-wepink.svc:80/metrics
down instance="webhooks-service-wepink.production-wepink.svc:80" job="kubernetes-services" 6.429s ago 6.691ms server returned HTTP status 404 Not Found
http://webhooks-service.release.svc:80/metrics
down app="webhooks-service" instance="webhooks-service.release.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 2.829s ago 5.049ms server returned HTTP status 404 Not Found
http://webhooks-service.staging.svc:80/metrics
down app="webhooks-service" instance="webhooks-service.staging.svc:80" job="kubernetes-services" k8slens_edit_resource_version="v1" 8.69s ago 1.025s Get "http://webhooks-service.staging.svc:80/metrics": dial tcp 10.100.109.82:80: connect: connection refused

node-exporter (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://node-exporter:9100/metrics
up instance="node-exporter:9100" job="node-exporter" 5.739s ago 24ms

serviceMonitor/default/kube-state-metrics-service-monitor/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://172.31.84.76:8080/metrics
up container="kube-state-metrics" endpoint="http" instance="172.31.84.76:8080" job="kube-state-metrics" namespace="default" pod="kube-state-metrics-6465bd56c9-lbkh2" service="kube-state-metrics" 3.438s ago 59.1ms

serviceMonitor/default/node-exporter-service-monitor/0 (12/12 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://172.31.1.90:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.1.90:9100" job="node-exporter" namespace="default" pod="node-exporter-dfktp" service="node-exporter" 14.268s ago 35.5ms
http://172.31.17.244:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.17.244:9100" job="node-exporter" namespace="default" pod="node-exporter-bl2r6" service="node-exporter" 8.316s ago 25.51ms
http://172.31.23.131:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.23.131:9100" job="node-exporter" namespace="default" pod="node-exporter-d4xgv" service="node-exporter" 11.707s ago 25.46ms
http://172.31.33.232:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.33.232:9100" job="node-exporter" namespace="default" pod="node-exporter-5wctx" service="node-exporter" 11.133s ago 44.76ms
http://172.31.33.69:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.33.69:9100" job="node-exporter" namespace="default" pod="node-exporter-8jzs8" service="node-exporter" 3.177s ago 112ms
http://172.31.35.209:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.35.209:9100" job="node-exporter" namespace="default" pod="node-exporter-qpt6g" service="node-exporter" 3.022s ago 24.25ms
http://172.31.49.218:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.49.218:9100" job="node-exporter" namespace="default" pod="node-exporter-7794f" service="node-exporter" 3.731s ago 25.32ms
http://172.31.50.147:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.50.147:9100" job="node-exporter" namespace="default" pod="node-exporter-ftwlh" service="node-exporter" 6.808s ago 28.97ms
http://172.31.66.137:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.66.137:9100" job="node-exporter" namespace="default" pod="node-exporter-vzq2w" service="node-exporter" 7.837s ago 20.08ms
http://172.31.86.118:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.86.118:9100" job="node-exporter" namespace="default" pod="node-exporter-5plr5" service="node-exporter" 13.323s ago 22.2ms
http://172.31.88.32:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.88.32:9100" job="node-exporter" namespace="default" pod="node-exporter-kfrjl" service="node-exporter" 2.834s ago 23.36ms
http://172.31.95.234:9100/metrics
up container="node-exporter" endpoint="node-exporter" instance="172.31.95.234:9100" job="node-exporter" namespace="default" pod="node-exporter-j9n22" service="node-exporter" 14.138s ago 24.33ms