mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-11 07:29:29 -05:00
constellation-access-manager: Persistent SSH as ConfigMap (#184)
This commit is contained in:
parent
1e19e64fbc
commit
f0b8412ef8
54
.github/workflows/build-access-manager-image.yml
vendored
Normal file
54
.github/workflows/build-access-manager-image.yml
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
name: Build and Upload access-manager image
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: access-manager
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "access_manager/**"
|
||||
- "internal/deploy/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY }}/${{ github.repository }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=ref,event=branch
|
||||
- name: Set up Docker Buildx
|
||||
id: docker-setup
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Log in to the Container registry
|
||||
id: docker-login
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push constellation-access-manager container image
|
||||
id: build-constellation-access-manager
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: access_manager/Dockerfile
|
||||
target: release
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
@ -7,8 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
## [Unreleased]
|
||||
### Added
|
||||
- Early boot logging for Cloud Provider: GCP & Azure
|
||||
- Added `constellation-access-manager`, allowing users to manage SSH users over a ConfigMap. This allows persistent & dynamic management of SSH users on multiple nodes, even after a reboot.
|
||||
|
||||
### Changed
|
||||
- Moved KMS image build instructions to `Dockerfile.services` to have a centralized Dockerfile for all in-repo microservices.
|
||||
|
||||
### Removed
|
||||
|
||||
|
@ -4,6 +4,7 @@ This is the main repository of Constellation.
|
||||
|
||||
Core components:
|
||||
|
||||
* [access_manager](access_manager): Contains the access-manager pod used to persist SSH users based on a K8s ConfigMap
|
||||
* [cli](cli): The CLI is used to manage a Constellation cluster
|
||||
* [coordinator](coordinator): The Coordinator is a node agent whose most important task is to bootstrap a node
|
||||
* [image](image): Build files for the Constellation disk image
|
||||
|
31
access_manager/Dockerfile
Normal file
31
access_manager/Dockerfile
Normal file
@ -0,0 +1,31 @@
|
||||
FROM fedora@sha256:36af84ba69e21c9ef86a0424a090674c433b2b80c2462e57503886f1d823abe8 as build
|
||||
|
||||
RUN dnf -y update && \
|
||||
dnf -y install @development-tools pkg-config iproute iputils wget git jq openssl-devel cryptsetup-libs cryptsetup-devel && \
|
||||
dnf clean all
|
||||
|
||||
# Install Go
|
||||
ARG GO_VER=1.18
|
||||
RUN wget https://go.dev/dl/go${GO_VER}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go${GO_VER}.linux-amd64.tar.gz && \
|
||||
rm go${GO_VER}.linux-amd64.tar.gz
|
||||
ENV PATH ${PATH}:/usr/local/go/bin
|
||||
|
||||
# Download go dependencies
|
||||
WORKDIR /constellation/
|
||||
COPY go.mod ./
|
||||
COPY go.sum ./
|
||||
RUN go mod download all
|
||||
|
||||
# Copy Repo
|
||||
COPY . /constellation
|
||||
RUN rm -rf ./hack/
|
||||
|
||||
# Build the access_manager
|
||||
WORKDIR /constellation/access_manager/
|
||||
RUN CGO_ENABLED=0 go build -o /constellation/build/access_manager -ldflags "-s -w" .
|
||||
|
||||
# Copy the access_manager from build into a scratch container, which is eventually deployed into the cluster
|
||||
FROM scratch as release
|
||||
COPY --from=build /constellation/build/access_manager /access_manager
|
||||
ENTRYPOINT [ "/access_manager" ]
|
297
access_manager/access_manager.go
Normal file
297
access_manager/access_manager.go
Normal file
@ -0,0 +1,297 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1Options "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// hostPath holds the path to the host's root file system we chroot into.
|
||||
hostPath = "/host"
|
||||
|
||||
// normalHomePath holds the general home directory of a system.
|
||||
normalHomePath = "/var/home"
|
||||
|
||||
// evictedHomePath holds the directory to which deleted user directories are moved to.
|
||||
evictedHomePath = "/var/evicted"
|
||||
|
||||
// relativePathToSSHKeys holds the path inside an user's directory to the SSH keys.
|
||||
// Needs to be in sync with internal/deploy/ssh.go.
|
||||
relativePathToSSHKeys = ".ssh/authorized_keys.d/constellation-ssh-keys"
|
||||
|
||||
// timeout is the maximum time to wait for communication with the Kubernetes API server.
|
||||
timeout = 60 * time.Second
|
||||
)
|
||||
|
||||
// uidGidPair holds the user owner and group owner of a directory.
|
||||
type uidGIDPair struct {
|
||||
UID uint32
|
||||
GID uint32
|
||||
}
|
||||
|
||||
func main() {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
log.Println("Starting constellation-access-manager as unknown pod.")
|
||||
} else {
|
||||
log.Println("Starting constellation-access-manager as", hostname)
|
||||
}
|
||||
|
||||
// Retrieve configMap from Kubernetes API before we chroot into the host filesystem.
|
||||
configMap, err := retrieveConfigMap()
|
||||
if err != nil {
|
||||
log.Panicf("Failed to retrieve ConfigMap from Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
// Chroot into main system
|
||||
if err := syscall.Chroot(hostPath); err != nil {
|
||||
log.Panicf("Failed to chroot into host filesystem: %v", err)
|
||||
}
|
||||
if err := syscall.Chdir("/"); err != nil {
|
||||
log.Panicf("Failed to chdir into host filesystem: %v", err)
|
||||
}
|
||||
|
||||
fs := afero.NewOsFs()
|
||||
linuxUserManager := user.NewLinuxUserManager(fs)
|
||||
|
||||
if err := run(fs, linuxUserManager, configMap); err != nil {
|
||||
// So far there is only one error path in this code, and this is getting the user directories... So just make the error specific here for now.
|
||||
log.Panicf("Failed to retrieve existing user directories: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// loadClientSet loads the Kubernetes API client.
|
||||
func loadClientSet() (*kubernetes.Clientset, error) {
|
||||
// creates the in-cluster config
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// creates the clientset
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientset, nil
|
||||
}
|
||||
|
||||
// deployKeys creates or evicts users based on the ConfigMap and deploy their SSH keys.
|
||||
func deployKeys(ctx context.Context, configMap *v1.ConfigMap, fs afero.Fs, linuxUserManager user.LinuxUserManager, userMap map[string]uidGIDPair, sshAccess *ssh.Access) {
|
||||
// If no ConfigMap exists or has been emptied, evict all users and exit.
|
||||
if configMap == nil || len(configMap.Data) == 0 {
|
||||
for username, ownership := range userMap {
|
||||
if username != "root" {
|
||||
evictedUserPath := path.Join(evictedHomePath, username)
|
||||
log.Printf("Evicting '%s' with previous UID '%d' and GID '%d' to %s.\n", username, ownership.UID, ownership.GID, evictedUserPath)
|
||||
if err := evictUser(username, fs, linuxUserManager); err != nil {
|
||||
log.Printf("Did not evict '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// Remove root's SSH key specifically instead of evicting the whole directory.
|
||||
if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) {
|
||||
log.Printf("Failed to remove previously existing root key: %v\n", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// First, recreate users that already existed, if they are defined in the configMap.
|
||||
// For users which do not exist, we move their user directories to avoid accidental takeovers but also loss of data.
|
||||
for username, ownership := range userMap {
|
||||
if username != "root" {
|
||||
if _, ok := configMap.Data[username]; ok {
|
||||
log.Printf("Recreating '%s' with UID %d and GID %d, if not existent.\n", username, ownership.UID, ownership.GID)
|
||||
if err := linuxUserManager.Creator.CreateUserWithSpecificUIDAndGID(ctx, username, int(ownership.UID), int(ownership.GID)); err != nil {
|
||||
log.Printf("Did not recreate '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
evictedUserPath := path.Join(evictedHomePath, username)
|
||||
log.Printf("Evicting '%s' with previous UID '%d' and GID '%d' to %s.\n", username, ownership.UID, ownership.GID, evictedUserPath)
|
||||
if err := evictUser(username, fs, linuxUserManager); err != nil {
|
||||
log.Printf("Did not to evict '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Always remove the root key first, even if it is about to be redeployed.
|
||||
if err := evictRootKey(fs, linuxUserManager); err != nil && !os.IsNotExist(err) {
|
||||
log.Printf("Failed to remove previously existing root key: %v\n", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Then, create the remaining users from the configMap (if remaining) and deploy SSH keys for all users.
|
||||
for username, publicKey := range configMap.Data {
|
||||
if _, ok := userMap[username]; !ok {
|
||||
log.Printf("Creating user '%s'\n", username)
|
||||
if err := linuxUserManager.Creator.CreateUser(ctx, username); err != nil {
|
||||
log.Printf("Failed to create '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If we created an user, let's actually get the home directory instead of assuming it's the same as the normal home directory.
|
||||
user, err := linuxUserManager.GetLinuxUser(username)
|
||||
if err != nil {
|
||||
log.Printf("Failed to retrieve information about user '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete already deployed keys
|
||||
pathToSSHKeys := filepath.Join(user.Home, relativePathToSSHKeys)
|
||||
if err := fs.Remove(pathToSSHKeys); err != nil && !os.IsNotExist(err) {
|
||||
log.Printf("Failed to delete remaining managed SSH keys for '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// And (re)deploy the keys from the ConfigMap
|
||||
newKey := ssh.UserKey{
|
||||
Username: username,
|
||||
PublicKey: publicKey,
|
||||
}
|
||||
|
||||
log.Printf("Deploying new SSH key for '%s'.\n", username)
|
||||
if err := sshAccess.DeployAuthorizedKey(context.Background(), newKey); err != nil {
|
||||
log.Printf("Failed to deploy SSH keys for '%s': %v\n", username, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// evictUser moves an user directory to evictedPath and changes their owner recursive to root.
|
||||
func evictUser(username string, fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
|
||||
if _, err := linuxUserManager.GetLinuxUser(username); err == nil {
|
||||
return fmt.Errorf("user '%s' still seems to exist", username)
|
||||
}
|
||||
|
||||
// First, ensure evictedPath already exists.
|
||||
if err := fs.MkdirAll(evictedHomePath, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build paths to the user's home directory and evicted home directory, which includes a timestamp to avoid collisions.
|
||||
oldUserDir := path.Join(normalHomePath, username)
|
||||
evictedUserDir := path.Join(evictedHomePath, fmt.Sprintf("%s_%d", username, time.Now().Unix()))
|
||||
|
||||
// Move old, not recreated user directory to evictedPath.
|
||||
if err := fs.Rename(oldUserDir, evictedUserDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Chown the user directory and all files inside to root, but do not change permissions to allow recovery without messed up permissions.
|
||||
if err := fs.Chown(evictedUserDir, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := afero.Walk(fs, evictedUserDir, func(name string, info os.FileInfo, err error) error {
|
||||
if err == nil {
|
||||
err = fs.Chown(name, 0, 0)
|
||||
}
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// evictRootKey removes the root key from the filesystem, instead of evicting the whole user directory.
|
||||
func evictRootKey(fs afero.Fs, linuxUserManager user.LinuxUserManager) error {
|
||||
log.Println("Removing any old keys for 'root', if existent.")
|
||||
user, err := linuxUserManager.GetLinuxUser("root")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete already deployed keys
|
||||
pathToSSHKeys := filepath.Join(user.Home, relativePathToSSHKeys)
|
||||
if err := fs.Remove(pathToSSHKeys); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retrieveConfigMap contacts the Kubernetes API server and retrieves the ssh-users ConfigMap.
|
||||
func retrieveConfigMap() (*v1.ConfigMap, error) {
|
||||
// Authenticate with the Kubernetes API and get the information from the ssh-users ConfigMap to recreate the users we need.
|
||||
log.Println("Authenticating with Kubernetes...")
|
||||
clientset, err := loadClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
log.Println("Requesting 'ssh-users' ConfigMap...")
|
||||
configmap, err := clientset.CoreV1().ConfigMaps("kube-system").Get(ctx, "ssh-users", v1Options.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return configmap, err
|
||||
}
|
||||
|
||||
// generateUserMap iterates the list of existing home directories to create a map of previously existing usernames to their previous respective UID and GID.
|
||||
func generateUserMap(fs afero.Fs) (map[string]uidGIDPair, error) {
|
||||
// Go through the normalHomePath directory, and create a mapping of existing user names in combination with their owner's UID & GID.
|
||||
// We use this information later to create missing users under the same UID and GID to avoid breakage.
|
||||
fileInfo, err := afero.ReadDir(fs, normalHomePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userMap := make(map[string]uidGIDPair)
|
||||
userMap["root"] = uidGIDPair{UID: 0, GID: 0}
|
||||
// This will fail under MemMapFS, since it's not UNIX-compatible.
|
||||
for _, singleInfo := range fileInfo {
|
||||
// Fail gracefully instead of hard.
|
||||
if stat, ok := singleInfo.Sys().(*syscall.Stat_t); ok {
|
||||
userMap[singleInfo.Name()] = uidGIDPair{UID: stat.Uid, GID: stat.Gid}
|
||||
log.Printf("Found home directory for '%s' (%d:%d).\n", singleInfo.Name(), stat.Uid, stat.Gid)
|
||||
} else {
|
||||
log.Printf("WARNING: Failed to retrieve UNIX stat for %s. User will not be evicted, or if this directory belongs to an user that is to be created later, it might be created under a different UID/GID than before.\n", singleInfo.Name())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return userMap, nil
|
||||
}
|
||||
|
||||
func run(fs afero.Fs, linuxUserManager user.LinuxUserManager, configMap *v1.ConfigMap) error {
|
||||
sshAccess := ssh.NewAccess(linuxUserManager)
|
||||
|
||||
// Generate userMap containing existing user directories and their ownership
|
||||
userMap, err := generateUserMap(fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try to deploy keys based on configmap.
|
||||
deployKeys(context.Background(), configMap, fs, linuxUserManager, userMap, sshAccess)
|
||||
|
||||
return nil
|
||||
}
|
316
access_manager/access_manager_test.go
Normal file
316
access_manager/access_manager_test.go
Normal file
@ -0,0 +1,316 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestEvictUser(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
fs := afero.NewMemMapFs()
|
||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
||||
|
||||
// Create fake user directory
|
||||
homePath := path.Join(normalHomePath, "myuser")
|
||||
err := fs.MkdirAll(homePath, 0o700)
|
||||
require.NoError(err)
|
||||
|
||||
// Try to evict the user
|
||||
assert.NoError(evictUser("myuser", fs, linuxUserManager))
|
||||
|
||||
// Check if user has been evicted
|
||||
homeEntries, err := afero.ReadDir(fs, normalHomePath)
|
||||
require.NoError(err)
|
||||
evictedEntries, err := afero.ReadDir(fs, evictedHomePath)
|
||||
require.NoError(err)
|
||||
assert.Len(homeEntries, 0)
|
||||
assert.Len(evictedEntries, 1)
|
||||
for _, singleEntry := range evictedEntries {
|
||||
assert.Contains(singleEntry.Name(), "myuser")
|
||||
}
|
||||
|
||||
/*
|
||||
Note: Unfourtunaly, due to a bug in afero, we cannot test that the files inside the directory have actually been moved.
|
||||
This works on the real filesystem, but not on the memory filesystem.
|
||||
See: https://github.com/spf13/afero/issues/141 (known since 2017, guess it will never get fixed ¯\_(ツ)_/¯)
|
||||
This limits the scope of this test, obviously... But I think as long as we can move the directory,
|
||||
the functionality on the real filesystem should be there (unless it throws an error).
|
||||
*/
|
||||
}
|
||||
|
||||
func TestDeployKeys(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
testCases := map[string]struct {
|
||||
configMap *v1.ConfigMap
|
||||
existingUsers map[string]uidGIDPair
|
||||
}{
|
||||
"undefined": {},
|
||||
"undefined map, empty users": {existingUsers: map[string]uidGIDPair{}},
|
||||
"empty map, undefined users": {configMap: &v1.ConfigMap{}},
|
||||
"both empty": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{},
|
||||
},
|
||||
"create two users, no existing users": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"user1": "ssh-rsa abcdefgh",
|
||||
"user2": "ssh-ed25519 defghijklm",
|
||||
},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{},
|
||||
},
|
||||
"empty configMap, user1 and user2 should be evicted": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{
|
||||
"user1": {
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
"user2": {
|
||||
UID: 1001,
|
||||
GID: 1001,
|
||||
},
|
||||
},
|
||||
},
|
||||
"configMap contains user2, user1 should be evicted, user2 recreated": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"user2": "ssh-rsa abcdefg",
|
||||
},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{
|
||||
"user1": {
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
"user2": {
|
||||
UID: 1001,
|
||||
GID: 1001,
|
||||
},
|
||||
},
|
||||
},
|
||||
"configMap contains user1 and user3, user1 should be recreated, user2 evicted, user3 created": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"user1": "ssh-rsa abcdefg",
|
||||
"user3": "ssh-ed25519 defghijklm",
|
||||
},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{
|
||||
"user1": {
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
"user2": {
|
||||
UID: 1001,
|
||||
GID: 1001,
|
||||
},
|
||||
},
|
||||
},
|
||||
"configMap contains user1 and user3, both should be recreated": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"user1": "ssh-rsa abcdefg",
|
||||
"user3": "ssh-ed25519 defghijklm",
|
||||
},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{
|
||||
"user1": {
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
"user3": {
|
||||
UID: 1002,
|
||||
GID: 1002,
|
||||
},
|
||||
},
|
||||
},
|
||||
"configMap contains user2, user1 and user3 should be evicted, user2 should be created": {
|
||||
configMap: &v1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"user2": "ssh-ed25519 defghijklm",
|
||||
},
|
||||
},
|
||||
existingUsers: map[string]uidGIDPair{
|
||||
"user1": {
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
"user3": {
|
||||
UID: 1002,
|
||||
GID: 1002,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
fs := afero.NewMemMapFs()
|
||||
require.NoError(fs.MkdirAll(normalHomePath, 0o700))
|
||||
require.NoError(fs.Mkdir("/etc", 0o644))
|
||||
_, err := fs.Create("/etc/passwd")
|
||||
require.NoError(err)
|
||||
|
||||
// Create fake user directories
|
||||
for user := range tc.existingUsers {
|
||||
userHomePath := path.Join(normalHomePath, user)
|
||||
err := fs.MkdirAll(userHomePath, 0o700)
|
||||
require.NoError(err)
|
||||
require.NoError(fs.Chown(userHomePath, int(tc.existingUsers[user].UID), int(tc.existingUsers[user].GID)))
|
||||
}
|
||||
|
||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
||||
sshAccess := ssh.NewAccess(linuxUserManager)
|
||||
deployKeys(context.Background(), tc.configMap, fs, linuxUserManager, tc.existingUsers, sshAccess)
|
||||
|
||||
// Unfourtunaly, we cannot retrieve the UID/GID from afero's MemMapFs without weird hacks,
|
||||
// as it does not have getters and it is not exported.
|
||||
if tc.configMap != nil && tc.existingUsers != nil {
|
||||
// Parse /etc/passwd and check for users
|
||||
passwdEntries, err := linuxUserManager.Passwd.Parse(fs)
|
||||
require.NoError(err)
|
||||
|
||||
// Check recreation or deletion
|
||||
for user := range tc.existingUsers {
|
||||
if _, ok := tc.configMap.Data[user]; ok {
|
||||
checkHomeDirectory(user, fs, assert, true)
|
||||
|
||||
// Check if user exists in /etc/passwd
|
||||
userEntry, ok := passwdEntries[user]
|
||||
assert.True(ok)
|
||||
|
||||
// Check if user has been recreated with correct UID/GID
|
||||
actualUID, err := strconv.Atoi(userEntry.Uid)
|
||||
assert.NoError(err)
|
||||
assert.EqualValues(tc.existingUsers[user].UID, actualUID)
|
||||
actualGID, err := strconv.Atoi(userEntry.Gid)
|
||||
assert.NoError(err)
|
||||
assert.EqualValues(tc.existingUsers[user].GID, actualGID)
|
||||
|
||||
// Check if the user has the right keys
|
||||
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n")
|
||||
|
||||
} else {
|
||||
// Check if home directory is not available anymore under the regular path
|
||||
checkHomeDirectory(user, fs, assert, false)
|
||||
|
||||
// Check if home directory has been evicted
|
||||
homeDirs, err := afero.ReadDir(fs, evictedHomePath)
|
||||
require.NoError(err)
|
||||
|
||||
var userDirectoryName string
|
||||
for _, singleDir := range homeDirs {
|
||||
if strings.Contains(singleDir.Name(), user+"_") {
|
||||
userDirectoryName = singleDir.Name()
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.NotEmpty(userDirectoryName)
|
||||
|
||||
// Check if user does not exist in /etc/passwd
|
||||
_, ok := passwdEntries[user]
|
||||
assert.False(ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Check creation of new users
|
||||
for user := range tc.configMap.Data {
|
||||
// We already checked recreated or evicted users, so skip them.
|
||||
if _, ok := tc.existingUsers[user]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
checkHomeDirectory(user, fs, assert, true)
|
||||
checkSSHKeys(user, fs, assert, tc.configMap.Data[user]+"\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictRootKey(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
// Create /etc/passwd with root entry
|
||||
require.NoError(fs.Mkdir("/etc", 0o644))
|
||||
file, err := fs.Create("/etc/passwd")
|
||||
require.NoError(err)
|
||||
passwdRootEntry := "root:x:0:0:root:/root:/bin/bash\n"
|
||||
n, err := file.WriteString(passwdRootEntry)
|
||||
require.NoError(err)
|
||||
require.Equal(len(passwdRootEntry), n)
|
||||
|
||||
// Deploy a fake key for root
|
||||
require.NoError(fs.MkdirAll("/root/.ssh/authorized_keys.d", 0o700))
|
||||
file, err = fs.Create(filepath.Join("/root", relativePathToSSHKeys))
|
||||
require.NoError(err)
|
||||
_, err = file.WriteString("ssh-ed25519 abcdefghijklm\n")
|
||||
require.NoError(err)
|
||||
|
||||
linuxUserManager := user.NewLinuxUserManagerFake(fs)
|
||||
|
||||
// Parse /etc/passwd and check for users
|
||||
passwdEntries, err := linuxUserManager.Passwd.Parse(fs)
|
||||
require.NoError(err)
|
||||
|
||||
// Check if user exists in /etc/passwd
|
||||
userEntry, ok := passwdEntries["root"]
|
||||
assert.True(ok)
|
||||
|
||||
// Check if user has been recreated with correct UID/GID
|
||||
actualUID, err := strconv.Atoi(userEntry.Uid)
|
||||
assert.NoError(err)
|
||||
assert.EqualValues(0, actualUID)
|
||||
actualGID, err := strconv.Atoi(userEntry.Gid)
|
||||
assert.NoError(err)
|
||||
assert.EqualValues(0, actualGID)
|
||||
|
||||
// Delete the key
|
||||
assert.NoError(evictRootKey(fs, linuxUserManager))
|
||||
|
||||
// Check if the key has been deleted
|
||||
_, err = fs.Stat(filepath.Join("/root", relativePathToSSHKeys))
|
||||
assert.True(os.IsNotExist(err))
|
||||
}
|
||||
|
||||
func checkSSHKeys(user string, fs afero.Fs, assert *assert.Assertions, expectedValue string) {
|
||||
// Do the same check as above
|
||||
_, err := fs.Stat(path.Join(normalHomePath, user))
|
||||
assert.NoError(err)
|
||||
|
||||
// Check if the user has the right keys
|
||||
authorizedKeys, err := afero.ReadFile(fs, filepath.Join(normalHomePath, user, relativePathToSSHKeys))
|
||||
assert.NoError(err)
|
||||
assert.EqualValues(expectedValue, string(authorizedKeys))
|
||||
}
|
||||
|
||||
func checkHomeDirectory(user string, fs afero.Fs, assert *assert.Assertions, shouldExist bool) {
|
||||
_, err := fs.Stat(path.Join(normalHomePath, user))
|
||||
if shouldExist {
|
||||
assert.NoError(err)
|
||||
} else {
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
}
|
||||
}
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"go.uber.org/zap"
|
||||
@ -21,14 +22,22 @@ func (c *Core) GetK8SCertificateKey(ctx context.Context) (string, error) {
|
||||
}
|
||||
|
||||
// InitCluster initializes the cluster, stores the join args, and returns the kubeconfig.
|
||||
func (c *Core) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error) {
|
||||
func (c *Core) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte, sshUsers []*pubproto.SSHUserKey) ([]byte, error) {
|
||||
c.zaplogger.Info("Initializing cluster")
|
||||
vpnIP, err := c.GetVPNIP()
|
||||
if err != nil {
|
||||
c.zaplogger.Error("Retrieving vpn ip failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if err := c.kube.InitCluster(ctx, autoscalingNodeGroups, cloudServiceAccountURI, vpnIP, masterSecret); err != nil {
|
||||
|
||||
// Convert SSH users map from protobuffer to map
|
||||
sshUsersMap := make(map[string]string)
|
||||
if len(sshUsers) > 0 {
|
||||
for _, value := range sshUsers {
|
||||
sshUsersMap[value.Username] = value.PublicKey
|
||||
}
|
||||
}
|
||||
if err := c.kube.InitCluster(ctx, autoscalingNodeGroups, cloudServiceAccountURI, vpnIP, masterSecret, sshUsersMap); err != nil {
|
||||
c.zaplogger.Error("Initializing cluster failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
@ -80,7 +89,7 @@ func (c *Core) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDisc
|
||||
// Cluster manages the overall cluster lifecycle (init, join).
|
||||
type Cluster interface {
|
||||
// InitCluster bootstraps a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte) error
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte, sshUsers map[string]string) error
|
||||
// JoinCluster will join the current node to an existing cluster.
|
||||
JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, nodeVPNIP, certKey string, peerRole role.Role) error
|
||||
// GetKubeconfig reads the kubeconfig from the filesystem. Only succeeds after cluster is initialized.
|
||||
@ -97,7 +106,7 @@ type Cluster interface {
|
||||
type ClusterFake struct{}
|
||||
|
||||
// InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster.
|
||||
func (c *ClusterFake) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte) error {
|
||||
func (c *ClusterFake) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte, sshUsers map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/internal/attestation/simulator"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/user"
|
||||
@ -22,6 +23,12 @@ func TestInitCluster(t *testing.T) {
|
||||
kubeconfigContent := []byte("kubeconfig")
|
||||
|
||||
testMS := []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
|
||||
testSSHUsers := make([]*pubproto.SSHUserKey, 0)
|
||||
testSSHUser := &pubproto.SSHUserKey{
|
||||
Username: "testUser",
|
||||
PublicKey: "ssh-rsa testKey",
|
||||
}
|
||||
testSSHUsers = append(testSSHUsers, testSSHUser)
|
||||
|
||||
testCases := map[string]struct {
|
||||
cluster Cluster
|
||||
@ -29,6 +36,7 @@ func TestInitCluster(t *testing.T) {
|
||||
metadata ProviderMetadata
|
||||
masterSecret []byte
|
||||
autoscalingNodeGroups []string
|
||||
sshUsers []*pubproto.SSHUserKey
|
||||
wantErr bool
|
||||
}{
|
||||
"InitCluster works": {
|
||||
@ -46,7 +54,16 @@ func TestInitCluster(t *testing.T) {
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true, signalRoleErr: someErr},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
},
|
||||
"InitCluster works with SSH and KMS": {
|
||||
cluster: &clusterStub{
|
||||
kubeconfig: kubeconfigContent,
|
||||
},
|
||||
vpn: &stubVPN{interfaceIP: "192.0.2.1"},
|
||||
metadata: &stubMetadata{supportedRes: true},
|
||||
autoscalingNodeGroups: []string{"someNodeGroup"},
|
||||
masterSecret: testMS,
|
||||
sshUsers: testSSHUsers,
|
||||
},
|
||||
"cannot get VPN IP": {
|
||||
cluster: &clusterStub{
|
||||
@ -87,7 +104,7 @@ func TestInitCluster(t *testing.T) {
|
||||
core, err := NewCore(tc.vpn, tc.cluster, tc.metadata, nil, zapLogger, simulator.OpenSimulatedTPM, nil, file.NewHandler(fs), user.NewLinuxUserManagerFake(fs))
|
||||
require.NoError(err)
|
||||
|
||||
kubeconfig, err := core.InitCluster(context.Background(), tc.autoscalingNodeGroups, "cloud-service-account-uri", tc.masterSecret)
|
||||
kubeconfig, err := core.InitCluster(context.Background(), tc.autoscalingNodeGroups, "cloud-service-account-uri", tc.masterSecret, tc.sshUsers)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -179,7 +196,7 @@ type clusterStub struct {
|
||||
inVpnIP string
|
||||
}
|
||||
|
||||
func (c *clusterStub) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, vpnIP string, masterSecret []byte) error {
|
||||
func (c *clusterStub) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, vpnIP string, masterSecret []byte, sshUsers map[string]string) error {
|
||||
c.inAutoscalingNodeGroups = autoscalingNodeGroups
|
||||
c.inCloudServiceAccountURI = cloudServiceAccountURI
|
||||
c.inVpnIP = vpnIP
|
||||
|
@ -8,11 +8,11 @@ import (
|
||||
|
||||
// CreateSSHUsers creates UNIX users with respective SSH access on the system the coordinator is running on when defined in the config.
|
||||
func (c *Core) CreateSSHUsers(sshUserKeys []ssh.UserKey) error {
|
||||
sshAccess := ssh.NewSSHAccess(c.linuxUserManager)
|
||||
sshAccess := ssh.NewAccess(c.linuxUserManager)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, pair := range sshUserKeys {
|
||||
if err := sshAccess.DeploySSHAuthorizedKey(ctx, pair); err != nil {
|
||||
if err := sshAccess.DeployAuthorizedKey(ctx, pair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
201
coordinator/kubernetes/k8sapi/resources/access_manager.go
Normal file
201
coordinator/kubernetes/k8sapi/resources/access_manager.go
Normal file
@ -0,0 +1,201 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"github.com/edgelesssys/constellation/internal/secrets"
|
||||
"google.golang.org/protobuf/proto"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
k8s "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// accessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
|
||||
type accessManagerDeployment struct {
|
||||
ConfigMap k8s.ConfigMap
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
Role rbac.Role
|
||||
RoleBinding rbac.RoleBinding
|
||||
DaemonSet apps.DaemonSet
|
||||
ImagePullSecret k8s.Secret
|
||||
}
|
||||
|
||||
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
|
||||
func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeployment {
|
||||
return &accessManagerDeployment{
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ServiceAccount",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/instance": "constellation",
|
||||
"app.kubernetes.io/name": "constellation-access-manager",
|
||||
"app.kubernetes.io/managed-by": "Constellation",
|
||||
},
|
||||
Name: "constellation-access-manager",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
AutomountServiceAccountToken: proto.Bool(true),
|
||||
},
|
||||
ConfigMap: k8s.ConfigMap{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "ssh-users",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: sshUsers,
|
||||
},
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "constellation-access-manager",
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/instance": "constellation",
|
||||
"app.kubernetes.io/name": "constellation-access-manager",
|
||||
},
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &v1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app.kubernetes.io/instance": "constellation",
|
||||
"app.kubernetes.io/name": "constellation-access-manager",
|
||||
},
|
||||
},
|
||||
Template: k8s.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/instance": "constellation",
|
||||
"app.kubernetes.io/name": "constellation-access-manager",
|
||||
},
|
||||
},
|
||||
Spec: k8s.PodSpec{
|
||||
Tolerations: []k8s.Toleration{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: k8s.TolerationOpExists,
|
||||
Effect: k8s.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: []k8s.LocalObjectReference{
|
||||
{
|
||||
Name: secrets.PullSecretName,
|
||||
},
|
||||
},
|
||||
Containers: []k8s.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: "gcr.io/google_containers/pause",
|
||||
ImagePullPolicy: k8s.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
InitContainers: []k8s.Container{
|
||||
{
|
||||
Name: "constellation-access-manager",
|
||||
Image: "ghcr.io/edgelesssys/constellation/access-manager:v1.2",
|
||||
VolumeMounts: []k8s.VolumeMount{
|
||||
{
|
||||
Name: "host",
|
||||
MountPath: "/host",
|
||||
},
|
||||
},
|
||||
SecurityContext: &k8s.SecurityContext{
|
||||
Capabilities: &k8s.Capabilities{
|
||||
Add: []k8s.Capability{
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "constellation-access-manager",
|
||||
Volumes: []k8s.Volume{
|
||||
{
|
||||
Name: "host",
|
||||
VolumeSource: k8s.VolumeSource{
|
||||
HostPath: &k8s.HostPathVolumeSource{
|
||||
Path: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Role: rbac.Role{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "Role",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/instance": "constellation",
|
||||
"app.kubernetes.io/name": "constellation-access-manager",
|
||||
"app.kubernetes.io/managed-by": "Constellation",
|
||||
},
|
||||
Name: "constellation-access-manager",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{
|
||||
"configmaps",
|
||||
},
|
||||
ResourceNames: []string{
|
||||
"ssh-users",
|
||||
},
|
||||
Verbs: []string{
|
||||
"get",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RoleBinding: rbac.RoleBinding{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "RoleBinding",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/instance": "constellation",
|
||||
"app.kubernetes.io/name": "constellation-access-manager",
|
||||
"app.kubernetes.io/managed-by": "Constellation",
|
||||
},
|
||||
Name: "constellation-access-manager",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: "constellation-access-manager",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "constellation-access-manager",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
ImagePullSecret: NewImagePullSecret(),
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal marshals the access-manager deployment as YAML documents.
|
||||
func (c *accessManagerDeployment) Marshal() ([]byte, error) {
|
||||
return MarshalK8SResources(c)
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAccessManagerMarshalUnmarshal(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
// Without data
|
||||
accessManagerDeplNil := NewAccessManagerDeployment(nil)
|
||||
data, err := accessManagerDeplNil.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated accessManagerDeployment
|
||||
require.NoError(UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(accessManagerDeplNil, &recreated)
|
||||
|
||||
// With data
|
||||
sshUsers := make(map[string]string)
|
||||
sshUsers["test-user"] = "ssh-rsa abcdefg"
|
||||
accessManagerDeplNil = NewAccessManagerDeployment(sshUsers)
|
||||
data, err = accessManagerDeplNil.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(accessManagerDeplNil, &recreated)
|
||||
}
|
@ -32,6 +32,22 @@ type Client interface {
|
||||
// TODO: add tolerations
|
||||
}
|
||||
|
||||
type ClusterUtil interface {
|
||||
InstallComponents(ctx context.Context, version string) error
|
||||
InitCluster(initConfig []byte) error
|
||||
JoinCluster(joinConfig []byte) error
|
||||
SetupPodNetwork(kubectl Client, podNetworkConfiguration resources.Marshaler) error
|
||||
SetupAccessManager(kubectl Client, accessManagerConfiguration resources.Marshaler) error
|
||||
SetupAutoscaling(kubectl Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
SetupKMS(kubectl Client, kmsConfiguration resources.Marshaler) error
|
||||
StartKubelet() error
|
||||
RestartKubelet() error
|
||||
GetControlPlaneJoinCertificateKey() (string, error)
|
||||
CreateJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error)
|
||||
}
|
||||
|
||||
// KubernetesUtil provides low level management of the kubernetes cluster.
|
||||
type KubernetesUtil struct {
|
||||
inst installer
|
||||
@ -197,6 +213,11 @@ func (k *KubernetesUtil) SetupCloudNodeManager(kubectl Client, cloudNodeManagerC
|
||||
return kubectl.Apply(cloudNodeManagerConfiguration, true)
|
||||
}
|
||||
|
||||
// SetupAccessManager deploys the constellation-access-manager for deploying SSH keys on control-plane & worker nodes.
|
||||
func (k *KubernetesUtil) SetupAccessManager(kubectl Client, accessManagerConfiguration resources.Marshaler) error {
|
||||
return kubectl.Apply(accessManagerConfiguration, true)
|
||||
}
|
||||
|
||||
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
|
||||
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte) error {
|
||||
// TODO: audit policy should be user input
|
||||
|
@ -14,6 +14,7 @@ type clusterUtil interface {
|
||||
InitCluster(ctx context.Context, initConfig []byte) error
|
||||
JoinCluster(ctx context.Context, joinConfig []byte) error
|
||||
SetupPodNetwork(context.Context, k8sapi.SetupPodNetworkInput) error
|
||||
SetupAccessManager(kubectl k8sapi.Client, sshUsers resources.Marshaler) error
|
||||
SetupAutoscaling(kubectl k8sapi.Client, clusterAutoscalerConfiguration resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudControllerManager(kubectl k8sapi.Client, cloudControllerManagerConfiguration resources.Marshaler, configMaps resources.Marshaler, secrets resources.Marshaler) error
|
||||
SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error
|
||||
|
@ -56,7 +56,7 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
||||
}
|
||||
|
||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||
func (k *KubeWrapper) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte) error {
|
||||
func (k *KubeWrapper) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI, vpnIP string, masterSecret []byte, sshUsers map[string]string) error {
|
||||
// TODO: k8s version should be user input
|
||||
if err := k.clusterUtil.InstallComponents(context.TODO(), "1.23.6"); err != nil {
|
||||
return err
|
||||
@ -152,6 +152,11 @@ func (k *KubeWrapper) InitCluster(ctx context.Context, autoscalingNodeGroups []s
|
||||
return fmt.Errorf("setting up cluster autoscaler failed: %w", err)
|
||||
}
|
||||
|
||||
accessManager := resources.NewAccessManagerDeployment(sshUsers)
|
||||
if err := k.clusterUtil.SetupAccessManager(k.client, accessManager); err != nil {
|
||||
return fmt.Errorf("failed to setup access-manager: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ func TestInitCluster(t *testing.T) {
|
||||
client: &tc.kubeCTL,
|
||||
kubeconfigReader: tc.kubeconfigReader,
|
||||
}
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, coordinatorVPNIP, masterSecret)
|
||||
err := kube.InitCluster(context.Background(), autoscalingNodeGroups, serviceAccountUri, coordinatorVPNIP, masterSecret, nil)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -501,6 +501,7 @@ type stubClusterUtil struct {
|
||||
setupCloudControllerManagerError error
|
||||
setupCloudNodeManagerError error
|
||||
setupKMSError error
|
||||
setupAccessManagerError error
|
||||
joinClusterErr error
|
||||
startKubeletErr error
|
||||
restartKubeletErr error
|
||||
@ -536,6 +537,10 @@ func (s *stubClusterUtil) SetupKMS(kubectl k8sapi.Client, kmsDeployment resource
|
||||
return s.setupKMSError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupAccessManager(kubectl k8sapi.Client, accessManagerConfiguration resources.Marshaler) error {
|
||||
return s.setupAccessManagerError
|
||||
}
|
||||
|
||||
func (s *stubClusterUtil) SetupCloudNodeManager(kubectl k8sapi.Client, cloudNodeManagerConfiguration resources.Marshaler) error {
|
||||
return s.setupCloudNodeManagerError
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (a *API) ActivateAsCoordinator(in *pubproto.ActivateAsCoordinatorRequest, s
|
||||
}
|
||||
|
||||
logToCLI("Initializing Kubernetes ...")
|
||||
kubeconfig, err := a.core.InitCluster(context.TODO(), in.AutoscalingNodeGroups, in.CloudServiceAccountUri, in.MasterSecret)
|
||||
kubeconfig, err := a.core.InitCluster(context.TODO(), in.AutoscalingNodeGroups, in.CloudServiceAccountUri, in.MasterSecret, in.SshUserKeys)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "initializing Kubernetes cluster failed: %v", err)
|
||||
}
|
||||
|
@ -219,22 +219,22 @@ func TestActivateAsCoordinator(t *testing.T) {
|
||||
assert.Equal([]role.Role{role.Coordinator}, core.persistNodeStateRoles)
|
||||
|
||||
// Test SSH user & key creation. Both cases: "supposed to add" and "not supposed to add"
|
||||
// This slightly differs from a real environment (e.g. missing /home) but should be fine in the stub context with a virtual file system
|
||||
// This slightly differs from a real environment (e.g. missing /var/home) but should be fine in the stub context with a virtual file system
|
||||
if tc.sshKeys != nil {
|
||||
passwd := user.Passwd{}
|
||||
entries, err := passwd.Parse(fs)
|
||||
require.NoError(err)
|
||||
for _, singleEntry := range entries {
|
||||
username := singleEntry.Gecos
|
||||
_, err := fs.Stat(fmt.Sprintf("/home/%s/.ssh/authorized_keys.d/ssh-keys", username))
|
||||
_, err := fs.Stat(fmt.Sprintf("/var/home/%s/.ssh/authorized_keys.d/constellation-ssh-keys", username))
|
||||
assert.NoError(err)
|
||||
}
|
||||
} else {
|
||||
passwd := user.Passwd{}
|
||||
_, err := passwd.Parse(fs)
|
||||
assert.EqualError(err, "open /etc/passwd: file does not exist")
|
||||
_, err = fs.Stat("/home")
|
||||
assert.EqualError(err, "open /home: file does not exist")
|
||||
_, err = fs.Stat("/var/home")
|
||||
assert.EqualError(err, "open /var/home: file does not exist")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
@ -39,6 +40,6 @@ type Core interface {
|
||||
|
||||
CreateSSHUsers([]ssh.UserKey) error
|
||||
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error)
|
||||
InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte, sshUserKeys []*pubproto.SSHUserKey) ([]byte, error)
|
||||
JoinCluster(ctx context.Context, joinToken *kubeadm.BootstrapTokenDiscovery, certificateKey string, role role.Role) error
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"net/netip"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/peer"
|
||||
"github.com/edgelesssys/constellation/coordinator/pubapi/pubproto"
|
||||
"github.com/edgelesssys/constellation/coordinator/role"
|
||||
"github.com/edgelesssys/constellation/coordinator/state"
|
||||
"github.com/edgelesssys/constellation/internal/deploy/ssh"
|
||||
@ -122,7 +123,7 @@ func (c *fakeCore) UpdatePeers(peers []peer.Peer) error {
|
||||
return c.UpdatePeersErr
|
||||
}
|
||||
|
||||
func (c *fakeCore) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte) ([]byte, error) {
|
||||
func (c *fakeCore) InitCluster(ctx context.Context, autoscalingNodeGroups []string, cloudServiceAccountURI string, masterSecret []byte, sshUsers []*pubproto.SSHUserKey) ([]byte, error) {
|
||||
c.autoscalingNodeGroups = autoscalingNodeGroups
|
||||
return c.kubeconfig, nil
|
||||
}
|
||||
@ -159,11 +160,11 @@ func (c *fakeCore) UpdateDiskPassphrase(passphrase string) error {
|
||||
}
|
||||
|
||||
func (c *fakeCore) CreateSSHUsers(sshUserKeys []ssh.UserKey) error {
|
||||
sshAccess := ssh.NewSSHAccess(c.linuxUserManager)
|
||||
sshAccess := ssh.NewAccess(c.linuxUserManager)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, pair := range sshUserKeys {
|
||||
if err := sshAccess.DeploySSHAuthorizedKey(ctx, pair); err != nil {
|
||||
if err := sshAccess.DeployAuthorizedKey(ctx, pair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -195,22 +195,22 @@ func TestActivateAsNode(t *testing.T) {
|
||||
assert.Equal([]role.Role{role.Node}, cor.persistNodeStateRoles)
|
||||
|
||||
// Test SSH user & key creation. Both cases: "supposed to add" and "not supposed to add"
|
||||
// This slightly differs from a real environment (e.g. missing /home) but should be fine in the stub context with a virtual file system
|
||||
// This slightly differs from a real environment (e.g. missing /var/home) but should be fine in the stub context with a virtual file system
|
||||
if tc.sshKeys != nil {
|
||||
passwd := user.Passwd{}
|
||||
entries, err := passwd.Parse(fs)
|
||||
require.NoError(err)
|
||||
for _, singleEntry := range entries {
|
||||
username := singleEntry.Gecos
|
||||
_, err := fs.Stat(fmt.Sprintf("/home/%s/.ssh/authorized_keys.d/ssh-keys", username))
|
||||
_, err := fs.Stat(fmt.Sprintf("/var/home/%s/.ssh/authorized_keys.d/constellation-ssh-keys", username))
|
||||
assert.NoError(err)
|
||||
}
|
||||
} else {
|
||||
passwd := user.Passwd{}
|
||||
_, err := passwd.Parse(fs)
|
||||
assert.EqualError(err, "open /etc/passwd: file does not exist")
|
||||
_, err = fs.Stat("/home")
|
||||
assert.EqualError(err, "open /home: file does not exist")
|
||||
_, err = fs.Stat("/var/home")
|
||||
assert.EqualError(err, "open /var/home: file does not exist")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func main() {
|
||||
fs := afero.NewOsFs()
|
||||
streamer := coordinator.NewFileStreamer(fs)
|
||||
serviceManager := deploy.NewServiceManager()
|
||||
ssh := ssh.NewSSHAccess(user.NewLinuxUserManager(fs))
|
||||
ssh := ssh.NewAccess(user.NewLinuxUserManager(fs))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -124,7 +124,7 @@ func (s *Scheduler) downloadCoordinator(ctx context.Context, ips []string) (succ
|
||||
// deploySSHKeys tries to deploy a list of SSH keys and logs errors encountered.
|
||||
func (s *Scheduler) deploySSHKeys(ctx context.Context, keys []ssh.UserKey) {
|
||||
for _, key := range keys {
|
||||
err := s.ssh.DeploySSHAuthorizedKey(ctx, key)
|
||||
err := s.ssh.DeployAuthorizedKey(ctx, key)
|
||||
if err != nil {
|
||||
log.Printf("error occurred while deploying ssh key %v: %v\n", key, err)
|
||||
continue
|
||||
@ -137,5 +137,5 @@ type downloader interface {
|
||||
}
|
||||
|
||||
type sshDeployer interface {
|
||||
DeploySSHAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error
|
||||
DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ type stubSSHDeployer struct {
|
||||
deployErr error
|
||||
}
|
||||
|
||||
func (s *stubSSHDeployer) DeploySSHAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error {
|
||||
func (s *stubSSHDeployer) DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error {
|
||||
s.sshKeys = append(s.sshKeys, sshKey)
|
||||
|
||||
return s.deployErr
|
||||
|
@ -37,7 +37,7 @@ func New(ssh sshDeployer, serviceManager serviceManager, streamer streamer) pb.D
|
||||
func (s *debugdServer) UploadAuthorizedKeys(ctx context.Context, in *pb.UploadAuthorizedKeysRequest) (*pb.UploadAuthorizedKeysResponse, error) {
|
||||
log.Println("Uploading authorized keys")
|
||||
for _, key := range in.Keys {
|
||||
if err := s.ssh.DeploySSHAuthorizedKey(ctx, ssh.UserKey{Username: key.Username, PublicKey: key.KeyValue}); err != nil {
|
||||
if err := s.ssh.DeployAuthorizedKey(ctx, ssh.UserKey{Username: key.Username, PublicKey: key.KeyValue}); err != nil {
|
||||
log.Printf("Uploading authorized keys failed: %v\n", err)
|
||||
return &pb.UploadAuthorizedKeysResponse{
|
||||
Status: pb.UploadAuthorizedKeysStatus_UPLOAD_AUTHORIZED_KEYS_FAILURE,
|
||||
@ -117,7 +117,7 @@ func Start(wg *sync.WaitGroup, serv pb.DebugdServer) {
|
||||
}
|
||||
|
||||
type sshDeployer interface {
|
||||
DeploySSHAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error
|
||||
DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error
|
||||
}
|
||||
|
||||
type serviceManager interface {
|
||||
|
@ -328,7 +328,7 @@ type stubSSHDeployer struct {
|
||||
deployErr error
|
||||
}
|
||||
|
||||
func (s *stubSSHDeployer) DeploySSHAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error {
|
||||
func (s *stubSSHDeployer) DeployAuthorizedKey(ctx context.Context, sshKey ssh.UserKey) error {
|
||||
s.sshKeys = append(s.sshKeys, sshKey)
|
||||
|
||||
return s.deployErr
|
||||
|
@ -16,16 +16,16 @@ type UserKey struct {
|
||||
PublicKey string
|
||||
}
|
||||
|
||||
// SSHAccess reads ssh public keys from a channel, creates the specified users if required and writes the public keys to the users authorized_keys file.
|
||||
type SSHAccess struct {
|
||||
// Access reads SSH public keys from a channel, creates the specified users if required and writes the public keys to the users authorized_keys file.
|
||||
type Access struct {
|
||||
userManager user.LinuxUserManager
|
||||
authorized map[string]bool
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
// NewSSHAccess creates a new SSHAccess.
|
||||
func NewSSHAccess(userManager user.LinuxUserManager) *SSHAccess {
|
||||
return &SSHAccess{
|
||||
// NewAccess creates a new Access.
|
||||
func NewAccess(userManager user.LinuxUserManager) *Access {
|
||||
return &Access{
|
||||
userManager: userManager,
|
||||
mux: sync.Mutex{},
|
||||
authorized: map[string]bool{},
|
||||
@ -33,17 +33,18 @@ func NewSSHAccess(userManager user.LinuxUserManager) *SSHAccess {
|
||||
}
|
||||
|
||||
// alreadyAuthorized checks if key was written to authorized keys before.
|
||||
func (s *SSHAccess) alreadyAuthorized(sshKey UserKey) bool {
|
||||
func (s *Access) alreadyAuthorized(sshKey UserKey) bool {
|
||||
_, ok := s.authorized[fmt.Sprintf("%s:%s", sshKey.Username, sshKey.PublicKey)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// rememberAuthorized marks this key as already written to authorized keys..
|
||||
func (s *SSHAccess) rememberAuthorized(sshKey UserKey) {
|
||||
func (s *Access) rememberAuthorized(sshKey UserKey) {
|
||||
s.authorized[fmt.Sprintf("%s:%s", sshKey.Username, sshKey.PublicKey)] = true
|
||||
}
|
||||
|
||||
func (s *SSHAccess) DeploySSHAuthorizedKey(ctx context.Context, sshKey UserKey) error {
|
||||
// DeployAuthorizedKey takes an user & public key pair, creates the user if required and deploy a SSH key for them.
|
||||
func (s *Access) DeployAuthorizedKey(ctx context.Context, sshKey UserKey) error {
|
||||
// allow only one thread to write to authorized keys, create users and update the authorized map at a time
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
@ -57,29 +58,29 @@ func (s *SSHAccess) DeploySSHAuthorizedKey(ctx context.Context, sshKey UserKey)
|
||||
}
|
||||
// CoreOS uses https://github.com/coreos/ssh-key-dir to search for ssh keys in ~/.ssh/authorized_keys.d/*
|
||||
sshFolder := fmt.Sprintf("%s/.ssh", user.Home)
|
||||
authorized_keys_d := fmt.Sprintf("%s/authorized_keys.d", sshFolder)
|
||||
if err := s.userManager.Fs.MkdirAll(authorized_keys_d, 0o700); err != nil {
|
||||
authorizedKeysD := fmt.Sprintf("%s/authorized_keys.d", sshFolder)
|
||||
if err := s.userManager.Fs.MkdirAll(authorizedKeysD, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.userManager.Fs.Chown(sshFolder, user.Uid, user.Gid); err != nil {
|
||||
if err := s.userManager.Fs.Chown(sshFolder, user.UID, user.GID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.userManager.Fs.Chown(authorized_keys_d, user.Uid, user.Gid); err != nil {
|
||||
if err := s.userManager.Fs.Chown(authorizedKeysD, user.UID, user.GID); err != nil {
|
||||
return err
|
||||
}
|
||||
authorizedKeysPath := fmt.Sprintf("%s/ssh-keys", authorized_keys_d)
|
||||
authorizedKeysPath := fmt.Sprintf("%s/constellation-ssh-keys", authorizedKeysD)
|
||||
authorizedKeysFile, err := s.userManager.Fs.OpenFile(authorizedKeysPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = authorizedKeysFile.WriteString(fmt.Sprintf("%s %s\n", sshKey.PublicKey, sshKey.Username))
|
||||
_, err = authorizedKeysFile.WriteString(fmt.Sprintf("%s\n", sshKey.PublicKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := authorizedKeysFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.userManager.Fs.Chown(authorizedKeysPath, user.Uid, user.Gid); err != nil {
|
||||
if err := s.userManager.Fs.Chown(authorizedKeysPath, user.UID, user.GID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.userManager.Fs.Chmod(authorizedKeysPath, 0o644); err != nil {
|
||||
|
@ -30,13 +30,13 @@ func TestDeploySSHAuthorizedKey(t *testing.T) {
|
||||
fs: afero.NewMemMapFs(),
|
||||
wantErr: false,
|
||||
wantFile: true,
|
||||
wantFileContents: "ssh-rsa testkey user\n",
|
||||
wantFileContents: "ssh-rsa testkey\n",
|
||||
},
|
||||
"appending ssh key works": {
|
||||
fs: memMapFsWithFile("/home/user/.ssh/authorized_keys.d/ssh-keys", "ssh-rsa preexistingkey user\n"),
|
||||
fs: memMapFsWithFile("/var/home/user/.ssh/authorized_keys.d/constellation-ssh-keys", "ssh-rsa preexistingkey\n"),
|
||||
wantErr: false,
|
||||
wantFile: true,
|
||||
wantFileContents: "ssh-rsa preexistingkey user\nssh-rsa testkey user\n",
|
||||
wantFileContents: "ssh-rsa preexistingkey\nssh-rsa testkey\n",
|
||||
},
|
||||
"redeployment avoided": {
|
||||
fs: afero.NewMemMapFs(),
|
||||
@ -65,12 +65,12 @@ func TestDeploySSHAuthorizedKey(t *testing.T) {
|
||||
if tc.alreadyDeployed {
|
||||
authorized["user:ssh-rsa testkey"] = true
|
||||
}
|
||||
sshAccess := SSHAccess{
|
||||
sshAccess := Access{
|
||||
userManager: userManager,
|
||||
mux: sync.Mutex{},
|
||||
authorized: authorized,
|
||||
}
|
||||
err := sshAccess.DeploySSHAuthorizedKey(context.Background(), authorizedKey)
|
||||
err := sshAccess.DeployAuthorizedKey(context.Background(), authorizedKey)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -78,11 +78,11 @@ func TestDeploySSHAuthorizedKey(t *testing.T) {
|
||||
}
|
||||
require.NoError(err)
|
||||
if tc.wantFile {
|
||||
fileContents, err := afero.ReadFile(userManager.Fs, "/home/user/.ssh/authorized_keys.d/ssh-keys")
|
||||
fileContents, err := afero.ReadFile(userManager.Fs, "/var/home/user/.ssh/authorized_keys.d/constellation-ssh-keys")
|
||||
assert.NoError(err)
|
||||
assert.Equal(tc.wantFileContents, string(fileContents))
|
||||
} else {
|
||||
exists, err := afero.Exists(userManager.Fs, "/home/user/.ssh/authorized_keys.d/ssh-keys")
|
||||
exists, err := afero.Exists(userManager.Fs, "/var/home/user/.ssh/authorized_keys.d/constellation-ssh-keys")
|
||||
assert.NoError(err)
|
||||
assert.False(exists)
|
||||
}
|
||||
|
@ -4,20 +4,46 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Unix defines an user creation interface for UNIX systems.
|
||||
type Unix struct{}
|
||||
|
||||
// reference: https://man7.org/linux/man-pages/man8/useradd.8.html#EXIT_VALUES
|
||||
const exitCodeUsernameAlreadyInUse = 9
|
||||
const exitCodeAlreadyInUse = 9
|
||||
|
||||
// CreateUser creates a new user with sudo access. Returns successfully if creation succeeds or user existed already.
|
||||
// CreateUser creates a new user with sudo access.
|
||||
func (u Unix) CreateUser(ctx context.Context, username string) error {
|
||||
cmd := exec.CommandContext(ctx, "useradd", "-m", "-G", "wheel,sudo", username)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// do not fail if user already exists
|
||||
if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == exitCodeUsernameAlreadyInUse {
|
||||
return nil
|
||||
if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == exitCodeAlreadyInUse {
|
||||
return ErrUserOrGroupAlreadyExists
|
||||
}
|
||||
return fmt.Errorf("creating a new user failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateUserWithSpecificUIDAndGID creates a new user with sudo access and a specific UID and GID.
|
||||
func (u Unix) CreateUserWithSpecificUIDAndGID(ctx context.Context, username string, uid int, gid int) error {
|
||||
// Add group first with the targeted gid
|
||||
cmd := exec.CommandContext(ctx, "groupadd", "-g", strconv.Itoa(gid), username)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// do not fail if group already exists
|
||||
if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == exitCodeAlreadyInUse {
|
||||
return ErrUserOrGroupAlreadyExists
|
||||
}
|
||||
return fmt.Errorf("creating a new group failed: %w", err)
|
||||
}
|
||||
|
||||
// Then, create the user with both the UID and GID assigned.
|
||||
cmd = exec.CommandContext(ctx, "useradd", "-m", "-G", "wheel,sudo", "-u", strconv.Itoa(uid), "-g", strconv.Itoa(gid), username)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// do not fail if user already exists
|
||||
if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == exitCodeAlreadyInUse {
|
||||
return ErrUserOrGroupAlreadyExists
|
||||
}
|
||||
return fmt.Errorf("creating a new user failed: %w", err)
|
||||
}
|
||||
|
@ -13,20 +13,24 @@ import (
|
||||
// ErrUserDoesNotExist is returned by GetLinuxUser if a linux user does not exist yet.
|
||||
var ErrUserDoesNotExist = errors.New("user does not exist")
|
||||
|
||||
// ErrUserOrGroupAlreadyExists is the Go error converted from the result of useradd or groupadd when an user or group already exists.
|
||||
var ErrUserOrGroupAlreadyExists = errors.New("user or group already exists")
|
||||
|
||||
type passwdParser interface {
|
||||
Parse(fs afero.Fs) (Entries, error)
|
||||
}
|
||||
|
||||
type userCreator interface {
|
||||
CreateUser(ctx context.Context, username string) error
|
||||
CreateUserWithSpecificUIDAndGID(ctx context.Context, username string, uid int, gid int) error
|
||||
}
|
||||
|
||||
// LinuxUser holds relevant information about a linux user (subset of /etc/passwd).
|
||||
type LinuxUser struct {
|
||||
Username string
|
||||
Home string
|
||||
Uid int
|
||||
Gid int
|
||||
UID int
|
||||
GID int
|
||||
}
|
||||
|
||||
// LinuxUserManager can retrieve information on linux users and create new users.
|
||||
@ -58,13 +62,16 @@ func NewLinuxUserManagerFake(fs afero.Fs) LinuxUserManager {
|
||||
type StubUserCreator struct {
|
||||
fs afero.Fs
|
||||
usernames []string
|
||||
uids []int
|
||||
createUserErr error
|
||||
currentUID int
|
||||
}
|
||||
|
||||
// CreateUser for StubUserCreator creates an user for an unit test environment.
|
||||
func (s *StubUserCreator) CreateUser(ctx context.Context, username string) error {
|
||||
if stringInSlice(username, s.usernames) {
|
||||
return errors.New("username already exists")
|
||||
// do not fail if user already exists
|
||||
return nil
|
||||
}
|
||||
|
||||
// We want created users to start at UID 1000
|
||||
@ -78,7 +85,7 @@ func (s *StubUserCreator) CreateUser(ctx context.Context, username string) error
|
||||
|
||||
// If no predefined error is supposed to happen, increase the UID (unless the file system code fails)
|
||||
if s.fs != nil {
|
||||
lineToWrite := fmt.Sprintf("%s:x:%d:%d:%s:/home/%s:/bin/bash\n", username, s.currentUID, s.currentUID, username, username)
|
||||
lineToWrite := fmt.Sprintf("%s:x:%d:%d:%s:/var/home/%s:/bin/bash\n", username, s.currentUID, s.currentUID, username, username)
|
||||
file, err := s.fs.OpenFile("/etc/passwd", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -94,14 +101,59 @@ func (s *StubUserCreator) CreateUser(ctx context.Context, username string) error
|
||||
}
|
||||
}
|
||||
|
||||
s.currentUID += 1
|
||||
s.currentUID++
|
||||
s.usernames = append(s.usernames, username)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLinuxUser tries to find an existing linux user in /etc/passwd.
|
||||
func (l *LinuxUserManager) getLinuxUser(username string) (LinuxUser, error) {
|
||||
// CreateUserWithSpecificUIDAndGID for StubUserCreator creates an user with a specific UID and GID for an unit test environment.
|
||||
func (s *StubUserCreator) CreateUserWithSpecificUIDAndGID(ctx context.Context, username string, uid int, gid int) error {
|
||||
if stringInSlice(username, s.usernames) {
|
||||
// do not fail if user already exists
|
||||
return nil
|
||||
}
|
||||
if intInSlice(uid, s.uids) {
|
||||
return errors.New("uid is already used by another user")
|
||||
}
|
||||
|
||||
if s.createUserErr != nil {
|
||||
return s.createUserErr
|
||||
}
|
||||
|
||||
// If no predefined error is supposed to happen, increase the UID (unless the file system code fails)
|
||||
if s.fs != nil {
|
||||
lineToWrite := fmt.Sprintf("%s:x:%d:%d:%s:/var/home/%s:/bin/bash\n", username, uid, gid, username, username)
|
||||
file, err := s.fs.OpenFile("/etc/passwd", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
n, err := file.WriteString(lineToWrite)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n != len(lineToWrite) {
|
||||
return errors.New("written text too short")
|
||||
}
|
||||
}
|
||||
|
||||
// Mark UID as used (we don't track GIDs though, as multiple users can belong to one GID)
|
||||
s.uids = append(s.uids, uid)
|
||||
|
||||
// Avoid potential collisions
|
||||
if s.currentUID == uid {
|
||||
s.currentUID++
|
||||
}
|
||||
|
||||
s.usernames = append(s.usernames, username)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLinuxUser tries to find an existing linux user in /etc/passwd.
|
||||
func (l *LinuxUserManager) GetLinuxUser(username string) (LinuxUser, error) {
|
||||
entries, err := l.Passwd.Parse(l.Fs)
|
||||
if err != nil {
|
||||
return LinuxUser{}, err
|
||||
@ -121,19 +173,19 @@ func (l *LinuxUserManager) getLinuxUser(username string) (LinuxUser, error) {
|
||||
return LinuxUser{
|
||||
Username: username,
|
||||
Home: entry.Home,
|
||||
Uid: uid,
|
||||
Gid: gid,
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EnsureLinuxUserExists will try to create the user specified by username and call GetLinuxUser to retrieve user information.
|
||||
func (l *LinuxUserManager) EnsureLinuxUserExists(ctx context.Context, username string) (LinuxUser, error) {
|
||||
// try to create user (even if it already exists)
|
||||
if err := l.Creator.CreateUser(ctx, username); err != nil {
|
||||
if err := l.Creator.CreateUser(ctx, username); err != nil && !errors.Is(err, ErrUserOrGroupAlreadyExists) {
|
||||
return LinuxUser{}, err
|
||||
}
|
||||
|
||||
return l.getLinuxUser(username)
|
||||
return l.GetLinuxUser(username)
|
||||
}
|
||||
|
||||
// stringInSlice checks if a given string exists in a slice of strings.
|
||||
@ -145,3 +197,13 @@ func stringInSlice(a string, list []string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// intInSlice checks if a given string exists in a slice of strings.
|
||||
func intInSlice(a int, list []int) bool {
|
||||
for _, b := range list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -19,13 +19,13 @@ func TestGetLinuxUser(t *testing.T) {
|
||||
wantUser LinuxUser
|
||||
}{
|
||||
"get works": {
|
||||
passwdContents: "user:x:1000:1000:user:/home/user:/bin/bash\n",
|
||||
passwdContents: "user:x:1000:1000:user:/var/home/user:/bin/bash\n",
|
||||
wantErr: false,
|
||||
wantUser: LinuxUser{
|
||||
Username: "user",
|
||||
Home: "/home/user",
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Home: "/var/home/user",
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
},
|
||||
"user does not exist": {
|
||||
@ -37,11 +37,11 @@ func TestGetLinuxUser(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid uid": {
|
||||
passwdContents: "user:x:invalid:1000:user:/home/user:/bin/bash\n",
|
||||
passwdContents: "user:x:invalid:1000:user:/var/home/user:/bin/bash\n",
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid gid": {
|
||||
passwdContents: "user:x:1000:invalid:user:/home/user:/bin/bash\n",
|
||||
passwdContents: "user:x:1000:invalid:user:/var/home/user:/bin/bash\n",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
@ -54,7 +54,7 @@ func TestGetLinuxUser(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
assert.NoError(afero.WriteFile(fs, "/etc/passwd", []byte(tc.passwdContents), 0o755))
|
||||
manager := NewLinuxUserManagerFake(fs)
|
||||
user, err := manager.getLinuxUser(username)
|
||||
user, err := manager.GetLinuxUser(username)
|
||||
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
@ -79,9 +79,9 @@ func TestEnsureLinuxUserExists(t *testing.T) {
|
||||
wantErr: false,
|
||||
wantUser: LinuxUser{
|
||||
Username: "user",
|
||||
Home: "/home/user",
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Home: "/var/home/user",
|
||||
UID: 1000,
|
||||
GID: 1000,
|
||||
},
|
||||
},
|
||||
"create fails": {
|
||||
|
@ -5,9 +5,10 @@ import (
|
||||
"github.com/willdonnelly/passwd"
|
||||
)
|
||||
|
||||
// An Entry contains all the fields for a specific user. Re-exported to allow other module to only import this passwd module.
|
||||
// Entries contains the information for each user defined in '/etc/passwd'. Re-exported to allow other module to only import this passwd module.
|
||||
type Entries map[string]passwd.Entry
|
||||
|
||||
// Passwd allows to parse users from '/etc/passwd' on the local system.
|
||||
type Passwd struct{}
|
||||
|
||||
// Parse opens the '/etc/passwd' file and parses it into a map from usernames to Entries.
|
||||
|
Loading…
Reference in New Issue
Block a user