Skip to content

Commit

Permalink
tests: build and push locally for metal tests
Browse files Browse the repository at this point in the history
Because we now push much larger files, replace our in-memory storage with disk-backed storage.
  • Loading branch information
justinsb committed Sep 11, 2024
1 parent 0671e77 commit 774d2fd
Show file tree
Hide file tree
Showing 9 changed files with 439 additions and 33 deletions.
62 changes: 62 additions & 0 deletions hack/dev-build-metal.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/bin/bash

# Copyright 2024 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This is a convenience script for developing kOps on Metal.
# It builds the code, including nodeup, and uploads to our fake S3 storage server.
# It also sets KOPS_BASE_URL to point to that storage server.
# To use, source the script. For example `. hack/dev-build-metal.sh` (note the initial `.`)

# Can't use set -e in a script we want to source
#set -e

#set -x

REPO_ROOT=$(git rev-parse --show-toplevel)
cd "${REPO_ROOT}" || return

# Dev environments typically do not need to test multiple architectures
KOPS_ARCH=amd64
export KOPS_ARCH

# Configure aws cli to talk to local storage
aws configure --profile metal set aws_access_key_id accesskey
aws configure --profile metal set aws_secret_access_key secret
aws configure --profile metal set aws_region us-east-1
aws configure --profile metal set endpoint_url http://10.123.45.1:8443
export AWS_ENDPOINT_URL=http://10.123.45.1:8443
export AWS_PROFILE=metal
export AWS_REGION=us-east-1

# Avoid chunking in S3 uploads (not supported by our mock yet)
aws configure --profile metal set s3.multipart_threshold 64GB

export UPLOAD_DEST=s3://kops-dev-build/
aws --version
aws s3 ls "${UPLOAD_DEST}" || aws s3 mb "${UPLOAD_DEST}" || return
make kops-install dev-version-dist-${KOPS_ARCH} || return

hack/upload .build/upload/ "${UPLOAD_DEST}" || return

# Set KOPS_BASE_URL
(tools/get_version.sh | grep VERSION | awk '{print $2}') || return
KOPS_VERSION=$(tools/get_version.sh | grep VERSION | awk '{print $2}')
export KOPS_BASE_URL=http://10.123.45.1:8443/kops-dev-build/kops/${KOPS_VERSION}/
echo "set KOPS_BASE_URL=${KOPS_BASE_URL}"

# Set feature flags needed on Metal
# export KOPS_FEATURE_FLAGS=

echo "SUCCESS"
3 changes: 2 additions & 1 deletion tests/e2e/scenarios/bare-metal/dump-artifacts
Original file line number Diff line number Diff line change
Expand Up @@ -51,5 +51,6 @@ for vm in 0 1 2; do
vm_name="vm${vm}"
mkdir -p ${ARTIFACTS}/vms/${vm_name}/logs/
scp -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected]:/var/log/etcd* ${ARTIFACTS}/vms/${vm_name}/logs/ || true
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] journalctl --no-pager -u kubelet 2>&1 > ${ARTIFACTS}/vms/${vm_name}/logs/journal-kubelet.service || true
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] journalctl --no-pager -u kubelet 2>&1 > ${ARTIFACTS}/vms/${vm_name}/logs/kubelet.service || true
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] journalctl --no-pager -u kops-configuration 2>&1 > ${ARTIFACTS}/vms/${vm_name}/logs/kops-configuration.service || true
done
18 changes: 14 additions & 4 deletions tests/e2e/scenarios/bare-metal/run-test
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ set -o xtrace
REPO_ROOT=$(git rev-parse --show-toplevel)
cd ${REPO_ROOT}

BINDIR=${REPO_ROOT}/.build/bin
WORKDIR=${REPO_ROOT}/.build/

BINDIR=${WORKDIR}/bin
mkdir -p "${BINDIR}"
go build -o ${BINDIR}/kops ./cmd/kops

KOPS=${BINDIR}/kops
Expand All @@ -37,10 +40,17 @@ function cleanup() {

trap cleanup EXIT

# Create the directory that will back our mock s3 storage
rm -rf ${WORKDIR}/s3
mkdir -p ${WORKDIR}/s3/

# Start our VMs
${REPO_ROOT}/tests/e2e/scenarios/bare-metal/start-vms

echo "Waiting 30 seconds for VMs to start"
sleep 30
. hack/dev-build-metal.sh

echo "Waiting 10 seconds for VMs to start"
sleep 10

# Remove from known-hosts in case of reuse
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.10 || true
Expand Down Expand Up @@ -69,7 +79,7 @@ export S3_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
# Create the state-store bucket in our mock s3 server
export KOPS_STATE_STORE=s3://kops-state-store/
aws --version
aws --endpoint-url=${S3_ENDPOINT} s3 mb s3://kops-state-store
aws s3 ls s3://kops-state-store || aws s3 mb s3://kops-state-store

# List clusters (there should not be any yet)
${KOPS} get cluster || true
Expand Down
13 changes: 8 additions & 5 deletions tests/e2e/scenarios/bare-metal/start-vms
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ REPO_ROOT=$(git rev-parse --show-toplevel)
cd ${REPO_ROOT}/tests/e2e/scenarios/bare-metal

WORKDIR=${REPO_ROOT}/.build
BINDIR=${WORKDIR}/bin
mkdir -p $BINDIR

# Create SSH keys
mkdir -p ${WORKDIR}/.ssh
Expand All @@ -32,12 +34,12 @@ fi

# Build software we need
cd ${REPO_ROOT}/tools/metal/dhcp
go build -o ${WORKDIR}/dhcp .
go build -o ${BINDIR}/dhcp .
cd ${REPO_ROOT}/tools/metal/storage
go build -o ${WORKDIR}/storage .
go build -o ${BINDIR}/storage .

# Give permission to listen on ports < 1024 (sort of like a partial suid binary)
sudo setcap cap_net_bind_service=ep ${WORKDIR}/dhcp
sudo setcap cap_net_bind_service=ep ${BINDIR}/dhcp

# Install software we need
if ! genisoimage --version; then
Expand Down Expand Up @@ -110,7 +112,7 @@ After=network.target
EnvironmentFile=/etc/environment
Type=exec
WorkingDirectory=${WORKDIR}/
ExecStart=${WORKDIR}/dhcp
ExecStart=${BINDIR}/dhcp
Restart=always
[Install]
Expand All @@ -121,6 +123,7 @@ EOF
systemctl --user enable --now qemu-dhcp.service
}


function start_storage() {
mkdir -p ~/.config/systemd/user
cat <<EOF > ~/.config/systemd/user/qemu-storage.service
Expand All @@ -132,7 +135,7 @@ After=network.target
EnvironmentFile=/etc/environment
Type=exec
WorkingDirectory=${WORKDIR}/
ExecStart=${WORKDIR}/storage --http-listen=10.123.45.1:8443
ExecStart=${BINDIR}/storage --http-listen=10.123.45.1:8443 --storage-dir=${WORKDIR}/s3/
Restart=always
[Install]
Expand Down
52 changes: 48 additions & 4 deletions tools/metal/storage/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
"strings"

"github.com/kubernetes/kops/tools/metal/dhcp/pkg/objectstore"
"github.com/kubernetes/kops/tools/metal/dhcp/pkg/objectstore/testobjectstore"
"github.com/kubernetes/kops/tools/metal/dhcp/pkg/objectstore/fsobjectstore"
"github.com/kubernetes/kops/tools/metal/dhcp/pkg/s3model"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
Expand All @@ -47,13 +47,22 @@ func run(ctx context.Context) error {

httpListen := ""
flag.StringVar(&httpListen, "http-listen", httpListen, "endpoint on which to serve HTTP requests")

storageDir := ""
flag.StringVar(&storageDir, "storage-dir", storageDir, "directory in which to store data")

flag.Parse()

if httpListen == "" {
return fmt.Errorf("must specify http-listen flag")
}

store := testobjectstore.New()
if storageDir == "" {
return fmt.Errorf("must specify storage-dir flag")
}

// store := testobjectstore.New()
store := fsobjectstore.New(storageDir)

s3Server := &S3Server{
store: store,
Expand Down Expand Up @@ -88,7 +97,12 @@ type S3Server struct {
func (s *S3Server) ListAllMyBuckets(ctx context.Context, req *s3Request, r *ListAllMyBucketsInput) error {
output := &s3model.ListAllMyBucketsResult{}

for _, bucket := range s.store.ListBuckets(ctx) {
buckets, err := s.store.ListBuckets(ctx)
if err != nil {
return fmt.Errorf("listing buckets: %w", err)
}

for _, bucket := range buckets {
output.Buckets = append(output.Buckets, s3model.Bucket{
CreationDate: bucket.CreationDate.Format(s3TimeFormat),
Name: bucket.Name,
Expand Down Expand Up @@ -156,6 +170,12 @@ func (s *S3Server) ServeRequest(ctx context.Context, w http.ResponseWriter, r *h
Bucket: bucket,
Key: key,
})
case http.MethodHead:
// GetObject can handle req.Method == HEAD
return s.GetObject(ctx, req, &GetObjectInput{
Bucket: bucket,
Key: key,
})
case http.MethodPut:
return s.PutObject(ctx, req, &PutObjectInput{
Bucket: bucket,
Expand All @@ -180,6 +200,8 @@ type ListObjectsV2Input struct {
const s3TimeFormat = "2006-01-02T15:04:05.000Z"

func (s *S3Server) ListObjectsV2(ctx context.Context, req *s3Request, input *ListObjectsV2Input) error {
log := klog.FromContext(ctx)

bucket, _, err := s.store.GetBucket(ctx, input.Bucket)
if err != nil {
return fmt.Errorf("failed to get bucket %q: %w", input.Bucket, err)
Expand All @@ -200,18 +222,40 @@ func (s *S3Server) ListObjectsV2(ctx context.Context, req *s3Request, input *Lis
Name: input.Bucket,
}

prefixes := make(map[string]bool)
for _, object := range objects {
log.V(4).Info("found candidate object", "obj", object)
if input.Prefix != "" && !strings.HasPrefix(object.Key, input.Prefix) {
continue
}
if input.Delimiter != "" {
afterPrefix := object.Key[len(input.Prefix):]

tokens := strings.SplitN(afterPrefix, input.Delimiter, 2)
if len(tokens) == 2 {
prefixes[input.Prefix+tokens[0]+input.Delimiter] = true
continue
}
}
// TODO: support delimiter
output.Contents = append(output.Contents, s3model.Object{
Key: object.Key,
LastModified: object.LastModified.Format(s3TimeFormat),
Size: object.Size,
})
}

if input.Delimiter != "" {
for prefix := range prefixes {
output.CommonPrefixes = append(output.CommonPrefixes, s3model.CommonPrefix{
Prefix: prefix,
})
}
output.Delimiter = input.Delimiter
}
output.Prefix = input.Prefix
output.KeyCount = len(output.Contents)
output.IsTruncated = false

return req.writeXML(ctx, output)
}
Expand Down Expand Up @@ -270,7 +314,7 @@ func (s *S3Server) GetObject(ctx context.Context, req *s3Request, input *GetObje
})
}

return object.WriteTo(req.w)
return object.WriteTo(req.r, req.w)
}

type GetObjectACLInput struct {
Expand Down
Loading

0 comments on commit 774d2fd

Please sign in to comment.