mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-24 23:19:39 -05:00
ref: pre v2.15 cleanup (#2871)
This commit is contained in:
parent
3799525103
commit
489e07677e
@ -64,14 +64,6 @@ func TerraformIAMUpgradeVars(conf *config.Config, fileHandler file.Handler) (ter
|
|||||||
if err := terraform.VariablesFromBytes(oldVarBytes, &oldVars); err != nil {
|
if err := terraform.VariablesFromBytes(oldVarBytes, &oldVars); err != nil {
|
||||||
return nil, fmt.Errorf("parsing existing IAM workspace: %w", err)
|
return nil, fmt.Errorf("parsing existing IAM workspace: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migration from the "region" to the "location" field na.
|
|
||||||
// TODO(msanft): Remove after v2.14.0 is released.
|
|
||||||
if oldVars.Region != nil && *oldVars.Region != "" && oldVars.Location == "" {
|
|
||||||
oldVars.Location = *oldVars.Region
|
|
||||||
oldVars.Region = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
vars = azureTerraformIAMVars(conf, oldVars)
|
vars = azureTerraformIAMVars(conf, oldVars)
|
||||||
case cloudprovider.GCP:
|
case cloudprovider.GCP:
|
||||||
var oldVars terraform.GCPIAMVariables
|
var oldVars terraform.GCPIAMVariables
|
||||||
|
@ -245,11 +245,8 @@ type AzureNodeGroup struct {
|
|||||||
|
|
||||||
// AzureIAMVariables is user configuration for creating the IAM configuration with Terraform on Microsoft Azure.
|
// AzureIAMVariables is user configuration for creating the IAM configuration with Terraform on Microsoft Azure.
|
||||||
type AzureIAMVariables struct {
|
type AzureIAMVariables struct {
|
||||||
// Region is the Azure location to use. (e.g. westus).
|
|
||||||
// THIS FIELD IS DEPRECATED AND ONLY KEPT FOR MIGRATION PURPOSES. DO NOT USE.
|
|
||||||
Region *string `hcl:"region" cty:"region"` // TODO(msanft): Remove this field once v2.14.0 is released.
|
|
||||||
// Location is the Azure location to use. (e.g. westus)
|
// Location is the Azure location to use. (e.g. westus)
|
||||||
Location string `hcl:"location,optional" cty:"location"` // TODO(msanft): Make this required once v2.14.0 is released.
|
Location string `hcl:"location" cty:"location"`
|
||||||
// ServicePrincipal is the name of the service principal to use.
|
// ServicePrincipal is the name of the service principal to use.
|
||||||
ServicePrincipal string `hcl:"service_principal_name" cty:"service_principal_name"`
|
ServicePrincipal string `hcl:"service_principal_name" cty:"service_principal_name"`
|
||||||
// ResourceGroup is the name of the resource group to use.
|
// ResourceGroup is the name of the resource group to use.
|
||||||
|
@ -120,8 +120,7 @@ func (a actionFactory) appendNewAction(
|
|||||||
} else {
|
} else {
|
||||||
// This may break for external chart dependencies if we decide to upgrade more than one minor version at a time.
|
// This may break for external chart dependencies if we decide to upgrade more than one minor version at a time.
|
||||||
if err := newVersion.IsUpgradeTo(currentVersion); err != nil {
|
if err := newVersion.IsUpgradeTo(currentVersion); err != nil {
|
||||||
// TODO(3u13r): Remove when Constellation v2.14 is released.
|
// Allow bigger Cilium and Cert-Manager version jumps.
|
||||||
// We need to ignore that we jump from Cilium v1.12 to v1.15-pre. We have verified that this works.
|
|
||||||
if !(errors.Is(err, compatibility.ErrMinorDrift) && (release.releaseName == "cilium" || release.releaseName == "cert-manager")) {
|
if !(errors.Is(err, compatibility.ErrMinorDrift) && (release.releaseName == "cilium" || release.releaseName == "cert-manager")) {
|
||||||
return fmt.Errorf("invalid upgrade for %s: %w", release.releaseName, err)
|
return fmt.Errorf("invalid upgrade for %s: %w", release.releaseName, err)
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,6 @@ go_test(
|
|||||||
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
|
||||||
"@io_k8s_apimachinery//pkg/runtime",
|
"@io_k8s_apimachinery//pkg/runtime",
|
||||||
"@io_k8s_apimachinery//pkg/runtime/schema",
|
"@io_k8s_apimachinery//pkg/runtime/schema",
|
||||||
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3",
|
|
||||||
"@io_k8s_sigs_yaml//:yaml",
|
"@io_k8s_sigs_yaml//:yaml",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -104,11 +103,6 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv
|
|||||||
return fmt.Errorf("updating image version: %w", err)
|
return fmt.Errorf("updating image version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(3u13r): remove `reconcileKubeadmConfigMap` after v2.14.0 has been released.
|
|
||||||
if err := k.reconcileKubeadmConfigMap(ctx); err != nil {
|
|
||||||
return fmt.Errorf("reconciling kubeadm config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())
|
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())
|
||||||
nodeVersion.Spec.ImageReference = imageReference
|
nodeVersion.Spec.ImageReference = imageReference
|
||||||
nodeVersion.Spec.ImageVersion = imageVersion.String()
|
nodeVersion.Spec.ImageVersion = imageVersion.String()
|
||||||
@ -383,44 +377,6 @@ func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alph
|
|||||||
return updatedNodeVersion, err
|
return updatedNodeVersion, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *KubeCmd) reconcileKubeadmConfigMap(ctx context.Context) error {
|
|
||||||
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting ClusterConfig: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
|
|
||||||
if v.Name == "konnectivity-uds" {
|
|
||||||
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
|
|
||||||
if v.Name == "egress-config" {
|
|
||||||
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(clusterConfiguration.APIServer.ExtraArgs, "egress-selector-config-file")
|
|
||||||
|
|
||||||
newConfigYAML, err := yaml.Marshal(clusterConfiguration)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if kubeadmConfig.Data[constants.ClusterConfigurationKey] == string(newConfigYAML) {
|
|
||||||
k.log.Debugf("No changes to kubeadm config required")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
|
|
||||||
k.log.Debugf("Triggering kubeadm config update now")
|
|
||||||
if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
|
|
||||||
return fmt.Errorf("setting new kubeadm config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
k.log.Debugf("Successfully reconciled the cluster's kubeadm config")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidImageUpdate checks if the new image version is a valid upgrade, and there is no upgrade already running.
|
// isValidImageUpdate checks if the new image version is a valid upgrade, and there is no upgrade already running.
|
||||||
func (k *KubeCmd) isValidImageUpgrade(nodeVersion updatev1alpha1.NodeVersion, newImageVersion string, force bool) error {
|
func (k *KubeCmd) isValidImageUpgrade(nodeVersion updatev1alpha1.NodeVersion, newImageVersion string, force bool) error {
|
||||||
if !force {
|
if !force {
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -34,80 +33,21 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUpgradeNodeImage(t *testing.T) {
|
func TestUpgradeNodeImage(t *testing.T) {
|
||||||
clusterConf := kubeadmv1beta3.ClusterConfiguration{
|
|
||||||
APIServer: kubeadmv1beta3.APIServer{
|
|
||||||
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
|
|
||||||
ExtraArgs: map[string]string{},
|
|
||||||
ExtraVolumes: []kubeadmv1beta3.HostPathMount{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterConfBytes, err := json.Marshal(clusterConf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
validKubeadmConfig := &corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: constants.KubeadmConfigMap,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
constants.ClusterConfigurationKey: string(clusterConfBytes),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterConfWithKonnectivity := kubeadmv1beta3.ClusterConfiguration{
|
|
||||||
APIServer: kubeadmv1beta3.APIServer{
|
|
||||||
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
|
|
||||||
ExtraArgs: map[string]string{
|
|
||||||
"egress-selector-config-file": "/etc/kubernetes/egress-selector-config-file.yaml",
|
|
||||||
},
|
|
||||||
ExtraVolumes: []kubeadmv1beta3.HostPathMount{
|
|
||||||
{
|
|
||||||
Name: "egress-config",
|
|
||||||
HostPath: "/etc/kubernetes/egress-selector-config-file.yaml",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "konnectivity-uds",
|
|
||||||
HostPath: "/some/path/to/konnectivity-uds",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterConfBytesWithKonnectivity, err := json.Marshal(clusterConfWithKonnectivity)
|
|
||||||
require.NoError(t, err)
|
|
||||||
validKubeadmConfigWithKonnectivity := &corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: constants.KubeadmConfigMap,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
constants.ClusterConfigurationKey: string(clusterConfBytesWithKonnectivity),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
conditions []metav1.Condition
|
conditions []metav1.Condition
|
||||||
currentImageVersion semver.Semver
|
currentImageVersion semver.Semver
|
||||||
newImageVersion semver.Semver
|
newImageVersion semver.Semver
|
||||||
badImageVersion string
|
badImageVersion string
|
||||||
force bool
|
force bool
|
||||||
customKubeadmConfig *corev1.ConfigMap
|
|
||||||
getCRErr error
|
getCRErr error
|
||||||
wantErr bool
|
wantErr bool
|
||||||
wantUpdate bool
|
wantUpdate bool
|
||||||
assertCorrectError func(t *testing.T, err error) bool
|
assertCorrectError func(t *testing.T, err error) bool
|
||||||
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
|
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
|
||||||
}{
|
}{
|
||||||
"success with konnectivity migration": {
|
|
||||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
|
||||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
|
||||||
customKubeadmConfig: validKubeadmConfigWithKonnectivity,
|
|
||||||
wantUpdate: true,
|
|
||||||
},
|
|
||||||
"success": {
|
"success": {
|
||||||
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
|
||||||
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
|
||||||
@ -226,15 +166,12 @@ func TestUpgradeNodeImage(t *testing.T) {
|
|||||||
kubectl := &stubKubectl{
|
kubectl := &stubKubectl{
|
||||||
unstructuredInterface: unstructuredClient,
|
unstructuredInterface: unstructuredClient,
|
||||||
configMaps: map[string]*corev1.ConfigMap{
|
configMaps: map[string]*corev1.ConfigMap{
|
||||||
constants.KubeadmConfigMap: validKubeadmConfig,
|
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if tc.customClientFn != nil {
|
if tc.customClientFn != nil {
|
||||||
kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
|
||||||
}
|
}
|
||||||
if tc.customKubeadmConfig != nil {
|
|
||||||
kubectl.configMaps[constants.KubeadmConfigMap] = tc.customKubeadmConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
upgrader := KubeCmd{
|
upgrader := KubeCmd{
|
||||||
kubectl: kubectl,
|
kubectl: kubectl,
|
||||||
@ -255,12 +192,6 @@ func TestUpgradeNodeImage(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
// If the ConfigMap only exists in the updatedConfigMaps map, the Konnectivity values should have been removed
|
|
||||||
if strings.Contains(kubectl.configMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds") {
|
|
||||||
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds")
|
|
||||||
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-config")
|
|
||||||
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-selector-config-file")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,9 +76,6 @@ type State struct {
|
|||||||
// description: |
|
// description: |
|
||||||
// Schema version of this state file.
|
// Schema version of this state file.
|
||||||
Version string `yaml:"version"`
|
Version string `yaml:"version"`
|
||||||
|
|
||||||
// TODO(msanft): Add link to self-managed infrastructure docs once existing.
|
|
||||||
|
|
||||||
// description: |
|
// description: |
|
||||||
// State of the cluster's cloud resources. These values are retrieved during
|
// State of the cluster's cloud resources. These values are retrieved during
|
||||||
// cluster creation. In the case of self-managed infrastructure, the marked
|
// cluster creation. In the case of self-managed infrastructure, the marked
|
||||||
|
@ -44,22 +44,8 @@ func (c *Client) Upgrade(ctx context.Context, kubernetesComponents components.Co
|
|||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
// While we're transitioning between version 2.13 and 2.14, we need to
|
|
||||||
// expect an upgrade-agent that does not yet understand the
|
|
||||||
// KubernetesComponents proto field. Therefore, we pass the kubeadm
|
|
||||||
// component twice: once via KubeadmUrl/KubeadmHash, once as part of the
|
|
||||||
// kubernetesComponents argument.
|
|
||||||
kubeadm, err := kubernetesComponents.GetKubeadmComponent()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("expected a kubeadm Component: %w", err)
|
|
||||||
}
|
|
||||||
protoClient := upgradeproto.NewUpdateClient(conn)
|
protoClient := upgradeproto.NewUpdateClient(conn)
|
||||||
_, err = protoClient.ExecuteUpdate(ctx, &upgradeproto.ExecuteUpdateRequest{
|
_, err = protoClient.ExecuteUpdate(ctx, &upgradeproto.ExecuteUpdateRequest{
|
||||||
// TODO(burgerdev): remove these fields after releasing 2.14.
|
|
||||||
// %< ---------------------------------
|
|
||||||
KubeadmUrl: kubeadm.Url,
|
|
||||||
KubeadmHash: kubeadm.Hash,
|
|
||||||
// %< ---------------------------------
|
|
||||||
WantedKubernetesVersion: WantedKubernetesVersion,
|
WantedKubernetesVersion: WantedKubernetesVersion,
|
||||||
KubernetesComponents: kubernetesComponents,
|
KubernetesComponents: kubernetesComponents,
|
||||||
})
|
})
|
||||||
|
@ -131,20 +131,8 @@ func prepareUpdate(ctx context.Context, installer osInstaller, updateRequest *up
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var cs components.Components
|
|
||||||
if len(updateRequest.KubeadmUrl) > 0 {
|
|
||||||
cs = append(cs, &components.Component{
|
|
||||||
Url: updateRequest.KubeadmUrl,
|
|
||||||
Hash: updateRequest.KubeadmHash,
|
|
||||||
InstallPath: constants.KubeadmPath,
|
|
||||||
Extract: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
cs = append(cs, updateRequest.KubernetesComponents...)
|
|
||||||
|
|
||||||
// Download & install the Kubernetes components.
|
// Download & install the Kubernetes components.
|
||||||
for _, c := range cs {
|
for _, c := range updateRequest.KubernetesComponents {
|
||||||
if err := installer.Install(ctx, c); err != nil {
|
if err := installer.Install(ctx, c); err != nil {
|
||||||
return fmt.Errorf("installing Kubernetes component %q: %w", c.Url, err)
|
return fmt.Errorf("installing Kubernetes component %q: %w", c.Url, err)
|
||||||
}
|
}
|
||||||
|
@ -62,12 +62,7 @@ func TestPrepareUpdate(t *testing.T) {
|
|||||||
slimUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
|
slimUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
|
||||||
WantedKubernetesVersion: "v1.1.1",
|
WantedKubernetesVersion: "v1.1.1",
|
||||||
}
|
}
|
||||||
oldStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
|
updateRequest := &upgradeproto.ExecuteUpdateRequest{
|
||||||
WantedKubernetesVersion: "v1.1.1",
|
|
||||||
KubeadmUrl: "http://example.com/kubeadm",
|
|
||||||
KubeadmHash: "sha256:foo",
|
|
||||||
}
|
|
||||||
newStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
|
|
||||||
WantedKubernetesVersion: "v1.1.1",
|
WantedKubernetesVersion: "v1.1.1",
|
||||||
KubernetesComponents: []*components.Component{
|
KubernetesComponents: []*components.Component{
|
||||||
{
|
{
|
||||||
@ -79,8 +74,6 @@ func TestPrepareUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
combinedStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
|
combinedStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
|
||||||
WantedKubernetesVersion: "v1.1.1",
|
WantedKubernetesVersion: "v1.1.1",
|
||||||
KubeadmUrl: "http://example.com/kubeadm",
|
|
||||||
KubeadmHash: "sha256:foo",
|
|
||||||
KubernetesComponents: []*components.Component{
|
KubernetesComponents: []*components.Component{
|
||||||
{
|
{
|
||||||
Url: "data:application/octet-stream,foo",
|
Url: "data:application/octet-stream,foo",
|
||||||
@ -104,16 +97,16 @@ func TestPrepareUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"install error": {
|
"install error": {
|
||||||
installer: stubOsInstaller{InstallErr: fmt.Errorf("install error")},
|
installer: stubOsInstaller{InstallErr: fmt.Errorf("install error")},
|
||||||
updateRequest: oldStyleUpdateRequest,
|
updateRequest: updateRequest,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"new style works": {
|
"new style works": {
|
||||||
installer: stubOsInstaller{},
|
installer: stubOsInstaller{},
|
||||||
updateRequest: newStyleUpdateRequest,
|
updateRequest: updateRequest,
|
||||||
},
|
},
|
||||||
"new style install error": {
|
"new style install error": {
|
||||||
installer: stubOsInstaller{InstallErr: fmt.Errorf("install error")},
|
installer: stubOsInstaller{InstallErr: fmt.Errorf("install error")},
|
||||||
updateRequest: newStyleUpdateRequest,
|
updateRequest: updateRequest,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
"combined style works": {
|
"combined style works": {
|
||||||
|
@ -30,8 +30,6 @@ type ExecuteUpdateRequest struct {
|
|||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
KubeadmUrl string `protobuf:"bytes,1,opt,name=kubeadm_url,json=kubeadmUrl,proto3" json:"kubeadm_url,omitempty"`
|
|
||||||
KubeadmHash string `protobuf:"bytes,2,opt,name=kubeadm_hash,json=kubeadmHash,proto3" json:"kubeadm_hash,omitempty"`
|
|
||||||
WantedKubernetesVersion string `protobuf:"bytes,3,opt,name=wanted_kubernetes_version,json=wantedKubernetesVersion,proto3" json:"wanted_kubernetes_version,omitempty"`
|
WantedKubernetesVersion string `protobuf:"bytes,3,opt,name=wanted_kubernetes_version,json=wantedKubernetesVersion,proto3" json:"wanted_kubernetes_version,omitempty"`
|
||||||
KubernetesComponents []*components.Component `protobuf:"bytes,4,rep,name=kubernetes_components,json=kubernetesComponents,proto3" json:"kubernetes_components,omitempty"`
|
KubernetesComponents []*components.Component `protobuf:"bytes,4,rep,name=kubernetes_components,json=kubernetesComponents,proto3" json:"kubernetes_components,omitempty"`
|
||||||
}
|
}
|
||||||
@ -68,20 +66,6 @@ func (*ExecuteUpdateRequest) Descriptor() ([]byte, []int) {
|
|||||||
return file_upgrade_agent_upgradeproto_upgrade_proto_rawDescGZIP(), []int{0}
|
return file_upgrade_agent_upgradeproto_upgrade_proto_rawDescGZIP(), []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ExecuteUpdateRequest) GetKubeadmUrl() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.KubeadmUrl
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *ExecuteUpdateRequest) GetKubeadmHash() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.KubeadmHash
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *ExecuteUpdateRequest) GetWantedKubernetesVersion() string {
|
func (x *ExecuteUpdateRequest) GetWantedKubernetesVersion() string {
|
||||||
if x != nil {
|
if x != nil {
|
||||||
return x.WantedKubernetesVersion
|
return x.WantedKubernetesVersion
|
||||||
@ -143,33 +127,31 @@ var file_upgrade_agent_upgradeproto_upgrade_proto_rawDesc = []byte{
|
|||||||
0x61, 0x64, 0x65, 0x1a, 0x2d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x76, 0x65,
|
0x61, 0x64, 0x65, 0x1a, 0x2d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x76, 0x65,
|
||||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
|
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
|
||||||
0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f,
|
0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f,
|
||||||
0x74, 0x6f, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70,
|
0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70,
|
||||||
0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6b,
|
0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x19, 0x77,
|
||||||
0x75, 0x62, 0x65, 0x61, 0x64, 0x6d, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
0x61, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73,
|
||||||
0x52, 0x0a, 0x6b, 0x75, 0x62, 0x65, 0x61, 0x64, 0x6d, 0x55, 0x72, 0x6c, 0x12, 0x21, 0x0a, 0x0c,
|
0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17,
|
||||||
0x6b, 0x75, 0x62, 0x65, 0x61, 0x64, 0x6d, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01,
|
0x77, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73,
|
||||||
0x28, 0x09, 0x52, 0x0b, 0x6b, 0x75, 0x62, 0x65, 0x61, 0x64, 0x6d, 0x48, 0x61, 0x73, 0x68, 0x12,
|
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x15, 0x6b, 0x75, 0x62, 0x65, 0x72,
|
||||||
0x3a, 0x0a, 0x19, 0x77, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e,
|
0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73,
|
||||||
0x65, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
|
0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65,
|
||||||
0x28, 0x09, 0x52, 0x17, 0x77, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x75, 0x62, 0x65, 0x72, 0x6e,
|
0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x14, 0x6b,
|
||||||
0x65, 0x74, 0x65, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x15, 0x6b,
|
0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65,
|
||||||
0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
|
0x6e, 0x74, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52,
|
||||||
0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d,
|
0x0b, 0x6b, 0x75, 0x62, 0x65, 0x61, 0x64, 0x6d, 0x5f, 0x75, 0x72, 0x6c, 0x52, 0x0c, 0x6b, 0x75,
|
||||||
0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
|
0x62, 0x65, 0x61, 0x64, 0x6d, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x22, 0x17, 0x0a, 0x15, 0x45, 0x78,
|
||||||
0x74, 0x52, 0x14, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6d,
|
0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||||
0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x45, 0x78, 0x65, 0x63, 0x75,
|
0x6e, 0x73, 0x65, 0x32, 0x58, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a,
|
||||||
0x74, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1d,
|
||||||
0x32, 0x58, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x78,
|
0x2e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
|
||||||
0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x75, 0x70,
|
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
|
||||||
0x67, 0x72, 0x61, 0x64, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70, 0x64,
|
0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x55,
|
||||||
0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x75, 0x70, 0x67,
|
0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44, 0x5a,
|
||||||
0x72, 0x61, 0x64, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x55, 0x70, 0x64, 0x61,
|
0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65,
|
||||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69,
|
0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c,
|
||||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73,
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65,
|
||||||
0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69,
|
0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72,
|
||||||
0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x2d, 0x61, 0x67,
|
0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
0x65, 0x6e, 0x74, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -11,8 +11,8 @@ service Update {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ExecuteUpdateRequest {
|
message ExecuteUpdateRequest {
|
||||||
string kubeadm_url = 1;
|
reserved 1, 2;
|
||||||
string kubeadm_hash = 2;
|
reserved "kubeadm_url", "kubeadm_hash";
|
||||||
string wanted_kubernetes_version = 3;
|
string wanted_kubernetes_version = 3;
|
||||||
|
|
||||||
repeated components.Component kubernetes_components = 4;
|
repeated components.Component kubernetes_components = 4;
|
||||||
|
Loading…
Reference in New Issue
Block a user