fix: get namespace from file for Kubernetes storage client
Signed-off-by: m.nabokikh <maksim.nabokikh@flant.com>
This commit is contained in:
parent
baec4f79ce
commit
0754c30ac2
2 changed files with 163 additions and 34 deletions
|
@ -307,10 +307,10 @@ func newClient(cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, l
|
||||||
var t http.RoundTripper
|
var t http.RoundTripper
|
||||||
httpTransport := &http.Transport{
|
httpTransport := &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
Dial: (&net.Dialer{
|
DialContext: (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
}).Dial,
|
}).DialContext,
|
||||||
TLSClientConfig: tlsConfig,
|
TLSClientConfig: tlsConfig,
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
ExpectContinueTimeout: 1 * time.Second,
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
|
@ -423,34 +423,76 @@ func namespaceFromServiceAccountJWT(s string) (string, error) {
|
||||||
return data.Namespace, nil
|
return data.Namespace, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func inClusterConfig() (cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, err error) {
|
func namespaceFromFile(path string) (string, error) {
|
||||||
host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInClusterConfigNamespace(token, namespaceENV, namespacePath string) (string, error) {
|
||||||
|
namespace := os.Getenv(namespaceENV)
|
||||||
|
if namespace != "" {
|
||||||
|
return namespace, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace, err := namespaceFromServiceAccountJWT(token)
|
||||||
|
if err == nil {
|
||||||
|
return namespace, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fmt.Errorf("inspect service account token: %v", err)
|
||||||
|
namespace, fileErr := namespaceFromFile(namespacePath)
|
||||||
|
if fileErr == nil {
|
||||||
|
return namespace, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("%v: trying to get namespace from file: %v", err, fileErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func inClusterConfig() (k8sapi.Cluster, k8sapi.AuthInfo, string, error) {
|
||||||
|
const (
|
||||||
|
serviceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount/"
|
||||||
|
serviceAccountTokenPath = serviceAccountPath + "token"
|
||||||
|
serviceAccountCAPath = serviceAccountPath + "ca.crt"
|
||||||
|
serviceAccountNamespacePath = serviceAccountPath + "namespace"
|
||||||
|
|
||||||
|
kubernetesServiceHostENV = "KUBERNETES_SERVICE_HOST"
|
||||||
|
kubernetesServicePortENV = "KUBERNETES_SERVICE_PORT"
|
||||||
|
kubernetesPodNamespaceENV = "KUBERNETES_POD_NAMESPACE"
|
||||||
|
)
|
||||||
|
|
||||||
|
host, port := os.Getenv(kubernetesServiceHostENV), os.Getenv(kubernetesServicePortENV)
|
||||||
if len(host) == 0 || len(port) == 0 {
|
if len(host) == 0 || len(port) == 0 {
|
||||||
err = fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
|
return k8sapi.Cluster{}, k8sapi.AuthInfo{}, "", fmt.Errorf(
|
||||||
return
|
"unable to load in-cluster configuration, %s and %s must be defined",
|
||||||
|
kubernetesServiceHostENV,
|
||||||
|
kubernetesServicePortENV,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
// we need to wrap IPv6 addresses in square brackets
|
// we need to wrap IPv6 addresses in square brackets
|
||||||
// IPv4 also works with square brackets
|
// IPv4 also works with square brackets
|
||||||
host = "[" + host + "]"
|
host = "[" + host + "]"
|
||||||
cluster = k8sapi.Cluster{
|
cluster := k8sapi.Cluster{
|
||||||
Server: "https://" + host + ":" + port,
|
Server: "https://" + host + ":" + port,
|
||||||
CertificateAuthority: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
CertificateAuthority: serviceAccountCAPath,
|
||||||
}
|
}
|
||||||
token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
|
|
||||||
|
token, err := ioutil.ReadFile(serviceAccountTokenPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return cluster, k8sapi.AuthInfo{}, "", err
|
||||||
}
|
|
||||||
user = k8sapi.AuthInfo{Token: string(token)}
|
|
||||||
|
|
||||||
if namespace = os.Getenv("KUBERNETES_POD_NAMESPACE"); namespace == "" {
|
|
||||||
namespace, err = namespaceFromServiceAccountJWT(user.Token)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to inspect service account token: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
user := k8sapi.AuthInfo{Token: string(token)}
|
||||||
|
|
||||||
|
namespace, err := getInClusterConfigNamespace(user.Token, kubernetesPodNamespaceENV, serviceAccountNamespacePath)
|
||||||
|
if err != nil {
|
||||||
|
return cluster, user, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cluster, user, namespace, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func currentContext(config *k8sapi.Config) (cluster k8sapi.Cluster, user k8sapi.AuthInfo, ns string, err error) {
|
func currentContext(config *k8sapi.Config) (cluster k8sapi.Cluster, user k8sapi.AuthInfo, ns string, err error) {
|
||||||
|
@ -461,7 +503,7 @@ func currentContext(config *k8sapi.Config) (cluster k8sapi.Cluster, user k8sapi.
|
||||||
return cluster, user, "", errors.New("kubeconfig has no current context")
|
return cluster, user, "", errors.New("kubeconfig has no current context")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
context, ok := func() (k8sapi.Context, bool) {
|
k8sContext, ok := func() (k8sapi.Context, bool) {
|
||||||
for _, namedContext := range config.Contexts {
|
for _, namedContext := range config.Contexts {
|
||||||
if namedContext.Name == config.CurrentContext {
|
if namedContext.Name == config.CurrentContext {
|
||||||
return namedContext.Context, true
|
return namedContext.Context, true
|
||||||
|
@ -475,26 +517,26 @@ func currentContext(config *k8sapi.Config) (cluster k8sapi.Cluster, user k8sapi.
|
||||||
|
|
||||||
cluster, ok = func() (k8sapi.Cluster, bool) {
|
cluster, ok = func() (k8sapi.Cluster, bool) {
|
||||||
for _, namedCluster := range config.Clusters {
|
for _, namedCluster := range config.Clusters {
|
||||||
if namedCluster.Name == context.Cluster {
|
if namedCluster.Name == k8sContext.Cluster {
|
||||||
return namedCluster.Cluster, true
|
return namedCluster.Cluster, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return k8sapi.Cluster{}, false
|
return k8sapi.Cluster{}, false
|
||||||
}()
|
}()
|
||||||
if !ok {
|
if !ok {
|
||||||
return cluster, user, "", fmt.Errorf("no cluster named %q found", context.Cluster)
|
return cluster, user, "", fmt.Errorf("no cluster named %q found", k8sContext.Cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
user, ok = func() (k8sapi.AuthInfo, bool) {
|
user, ok = func() (k8sapi.AuthInfo, bool) {
|
||||||
for _, namedAuthInfo := range config.AuthInfos {
|
for _, namedAuthInfo := range config.AuthInfos {
|
||||||
if namedAuthInfo.Name == context.AuthInfo {
|
if namedAuthInfo.Name == k8sContext.AuthInfo {
|
||||||
return namedAuthInfo.AuthInfo, true
|
return namedAuthInfo.AuthInfo, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return k8sapi.AuthInfo{}, false
|
return k8sapi.AuthInfo{}, false
|
||||||
}()
|
}()
|
||||||
if !ok {
|
if !ok {
|
||||||
return cluster, user, "", fmt.Errorf("no user named %q found", context.AuthInfo)
|
return cluster, user, "", fmt.Errorf("no user named %q found", k8sContext.AuthInfo)
|
||||||
}
|
}
|
||||||
return cluster, user, context.Namespace, nil
|
return cluster, user, k8sContext.Namespace, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,12 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"hash"
|
"hash"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This test does not have an explicit error condition but is used
|
// This test does not have an explicit error condition but is used
|
||||||
|
@ -42,18 +46,101 @@ func TestOfflineTokenName(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNamespaceFromServiceAccountJWT(t *testing.T) {
|
func TestGetClusterConfigNamespace(t *testing.T) {
|
||||||
namespace, err := namespaceFromServiceAccountJWT(serviceAccountToken)
|
const namespaceENVVariableName = "TEST_GET_CLUSTER_CONFIG_NAMESPACE"
|
||||||
if err != nil {
|
{
|
||||||
t.Fatal(err)
|
os.Setenv(namespaceENVVariableName, "namespace-from-env")
|
||||||
|
defer os.Unsetenv(namespaceENVVariableName)
|
||||||
}
|
}
|
||||||
wantNamespace := "dex-test-namespace"
|
|
||||||
if namespace != wantNamespace {
|
var namespaceFile string
|
||||||
t.Errorf("expected namespace %q got %q", wantNamespace, namespace)
|
{
|
||||||
|
tmpfile, err := ioutil.TempFile(os.TempDir(), "test-get-cluster-config-namespace")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = tmpfile.Write([]byte("namespace-from-file"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
namespaceFile = tmpfile.Name()
|
||||||
|
defer os.Remove(namespaceFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
token string
|
||||||
|
fileName string
|
||||||
|
envVariable string
|
||||||
|
|
||||||
|
expectedError bool
|
||||||
|
expectedNamespace string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "With env variable",
|
||||||
|
envVariable: "TEST_GET_CLUSTER_CONFIG_NAMESPACE",
|
||||||
|
|
||||||
|
expectedNamespace: "namespace-from-env",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With token",
|
||||||
|
token: serviceAccountToken,
|
||||||
|
|
||||||
|
expectedNamespace: "dex-test-namespace",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With namespace file",
|
||||||
|
fileName: namespaceFile,
|
||||||
|
|
||||||
|
expectedNamespace: "namespace-from-file",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With file and token",
|
||||||
|
fileName: namespaceFile,
|
||||||
|
token: serviceAccountToken,
|
||||||
|
|
||||||
|
expectedNamespace: "dex-test-namespace",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With file and env",
|
||||||
|
fileName: namespaceFile,
|
||||||
|
envVariable: "TEST_GET_CLUSTER_CONFIG_NAMESPACE",
|
||||||
|
|
||||||
|
expectedNamespace: "namespace-from-env",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With token and env",
|
||||||
|
envVariable: "TEST_GET_CLUSTER_CONFIG_NAMESPACE",
|
||||||
|
token: serviceAccountToken,
|
||||||
|
|
||||||
|
expectedNamespace: "namespace-from-env",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With file, token and env",
|
||||||
|
fileName: namespaceFile,
|
||||||
|
token: serviceAccountToken,
|
||||||
|
envVariable: "TEST_GET_CLUSTER_CONFIG_NAMESPACE",
|
||||||
|
|
||||||
|
expectedNamespace: "namespace-from-env",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Without anything",
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
namespace, err := getInClusterConfigNamespace(tc.token, tc.envVariable, tc.fileName)
|
||||||
|
if tc.expectedError {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, namespace, tc.expectedNamespace)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var serviceAccountToken = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZXgtdGVzdC1uYW1lc3BhY2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoiZG90aGVyb2JvdC1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZG90aGVyb2JvdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjQyYjJhOTRmLTk4MjAtMTFlNi1iZDc0LTJlZmQzOGYxMjYxYyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZXgtdGVzdC1uYW1lc3BhY2U6ZG90aGVyb2JvdCJ9.KViBpPwCiBwxDvAjYUUXoVvLVwqV011aLlYQpNtX12Bh8M-QAFch-3RWlo_SR00bcdFg_nZo9JKACYlF_jHMEsf__PaYms9r7vEaSg0jPfkqnL2WXZktzQRyLBr0n-bxeUrbwIWsKOAC0DfFB5nM8XoXljRmq8yAx8BAdmQp7MIFb4EOV9nYthhua6pjzYyaFSiDiYTjw7HtXOvoL8oepodJ3-37pUKS8vdBvnvUoqC4M1YAhkO5L36JF6KV_RfmG8GPEdNQfXotHcsR-3jKi1n8S5l7Xd-rhrGOhSGQizH3dORzo9GvBAhYeqbq1O-NLzm2EQUiMQayIUx7o4g3Kw"
|
const serviceAccountToken = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZXgtdGVzdC1uYW1lc3BhY2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoiZG90aGVyb2JvdC1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZG90aGVyb2JvdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjQyYjJhOTRmLTk4MjAtMTFlNi1iZDc0LTJlZmQzOGYxMjYxYyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZXgtdGVzdC1uYW1lc3BhY2U6ZG90aGVyb2JvdCJ9.KViBpPwCiBwxDvAjYUUXoVvLVwqV011aLlYQpNtX12Bh8M-QAFch-3RWlo_SR00bcdFg_nZo9JKACYlF_jHMEsf__PaYms9r7vEaSg0jPfkqnL2WXZktzQRyLBr0n-bxeUrbwIWsKOAC0DfFB5nM8XoXljRmq8yAx8BAdmQp7MIFb4EOV9nYthhua6pjzYyaFSiDiYTjw7HtXOvoL8oepodJ3-37pUKS8vdBvnvUoqC4M1YAhkO5L36JF6KV_RfmG8GPEdNQfXotHcsR-3jKi1n8S5l7Xd-rhrGOhSGQizH3dORzo9GvBAhYeqbq1O-NLzm2EQUiMQayIUx7o4g3Kw"
|
||||||
|
|
||||||
// The following program was used to generate the example token. Since we don't want to
|
// The following program was used to generate the example token. Since we don't want to
|
||||||
// import Kubernetes, just leave it as a comment.
|
// import Kubernetes, just leave it as a comment.
|
||||||
|
|
Reference in a new issue