Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions spread.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,13 @@ environment:
USE_PREBUILT_SNAPD_SNAP: '$(HOST: echo "${SPREAD_USE_PREBUILT_SNAPD_SNAP:-false}")'
USE_SNAPD_SNAP_URL: '$(HOST: echo "${SPREAD_USE_SNAPD_SNAP_URL:-}")'

# When populated with a comma-separated list of feature key names,
# each spread task will be analyzed for the specified features and
# output a json file with the results. NOTE: this should only be
# used coupled with the -artifact spread tag otherwise the feature
# analyses will not be downloaded from the VMs.
TAG_FEATURES: '$(HOST: echo "${SPREAD_TAG_FEATURES:-}")'

backends:
google:
key: '$(HOST: echo "$SPREAD_GOOGLE_KEY")'
Expand Down Expand Up @@ -591,6 +598,10 @@ backends:

path: /home/gopath/src/github.com/snapcore/snapd

artifacts:
# Populated with feature tag information when SPREAD_TAG_FEATURES is set.
- feature-tags

exclude:
- .git
- cmd/snap/snap
Expand Down Expand Up @@ -1266,6 +1277,7 @@ suites:
snap install ubuntu-image --channel="$OLD_UBUNTU_IMAGE_SNAP_CHANNEL" --classic
fi
restore-each: |
"$TESTSLIB"/collect-features.sh --after-nested-task
tests.nested vm remove
tests.nested restore
"$TESTSLIB"/prepare-restore.sh --restore-suite-each
Expand Down Expand Up @@ -1326,6 +1338,7 @@ suites:
"$TESTSLIB"/prepare-restore.sh --prepare-suite-each
tests.nested create-vm classic
restore-each: |
"$TESTSLIB"/collect-features.sh --after-nested-task
tests.nested vm remove
"$TESTSLIB"/prepare-restore.sh --restore-suite-each
restore: |
Expand Down Expand Up @@ -1392,6 +1405,7 @@ suites:
"$TESTSLIB"/prepare-restore.sh --prepare-suite-each
tests.nested create-vm core
restore-each: |
"$TESTSLIB"/collect-features.sh --after-nested-task
tests.nested vm remove
"$TESTSLIB"/prepare-restore.sh --restore-suite-each
restore: |
Expand Down
40 changes: 40 additions & 0 deletions tests/lib/collect-features.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/bin/bash

after_non_nested_task() {
# Write to the directory specified in the spread.yaml file for artifacts
local write_dir="${SPREAD_PATH}/feature-tags"
local task_dir="${write_dir}/${SPREAD_JOB//\//--}"
mkdir -p "$write_dir"
mkdir -p "$task_dir"
"$TESTSTOOLS"/journal-state get-log --no-pager --output cat | grep '"TRACE"' > "$task_dir"/journal.txt
cp /var/lib/snapd/state.json "$task_dir"
}

after_nested_task() {
local write_dir="${SPREAD_PATH}/feature-tags"
local task_dir="${write_dir}/${SPREAD_JOB//\//--}"
mkdir -p "$write_dir"
mkdir -p "$task_dir"

"$TESTSTOOLS"/remote.exec "sudo journalctl --no-pager --output cat | grep '\"TRACE\"'" > "$task_dir"/journal.txt
"$TESTSTOOLS"/remote.exec "sudo chmod 777 /var/lib/snapd/state.json"
"$TESTSTOOLS"/remote.pull "/var/lib/snapd/state.json" "$task_dir"
}


case "$1" in
--after-non-nested-task)
if [ -n "$TAG_FEATURES" ]; then
after_non_nested_task
fi
;;
--after-nested-task)
if [ -n "$TAG_FEATURES" ]; then
after_nested_task
fi
;;
*)
echo "unsupported argument: $1"
exit 1
;;
esac
15 changes: 15 additions & 0 deletions tests/lib/nested.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1704,6 +1704,21 @@ nested_prepare_tools() {
REMOTE_PATH="$(remote.exec 'echo $PATH')"
remote.exec "echo PATH=$TOOLS_PATH:$REMOTE_PATH | sudo tee -a /etc/environment"
fi

if [ -n "$TAG_FEATURES" ]; then
# If feature tagging is enabled, then we need to enable debug logging
remote.exec "sudo mkdir -p /etc/systemd/system/snapd.service.d"
remote.exec "printf '[Service]\nEnvironment=SNAPD_DEBUG_HTTP=7 SNAPD_DEBUG=1 SNAPPY_TESTING=1\n' | sudo tee /etc/systemd/system/snapd.service.d/99-feature-tags.conf"
# Persist journal logs
remote.exec "sudo snap set system journal.persistent=true"
# We changed the service configuration so we need to reload and restart
# the units to get them applied
remote.exec "sudo systemctl daemon-reload"
# stop the socket (it pulls down the service)
remote.exec "sudo systemctl stop snapd.socket"
# start the service (it pulls up the socket)
remote.exec "sudo systemctl start snapd.service"
fi
}

nested_add_tty_chardev() {
Expand Down
7 changes: 7 additions & 0 deletions tests/lib/prepare-restore.sh
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,10 @@ prepare_suite() {
prepare_classic
fi

if [ -n "$TAG_FEATURES" ]; then
snap set system journal.persistent=true
fi

# Make sure the suite starts with a clean environment and with the snapd state restored
# shellcheck source=tests/lib/reset.sh
"$TESTSLIB"/reset.sh --reuse-core
Expand Down Expand Up @@ -733,6 +737,9 @@ prepare_suite_each() {
}

restore_suite_each() {
if not tests.nested is-nested; then
"$TESTSLIB"/collect-features.sh --after-non-nested-task
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we should do this just when SPREAD_TAG_FEATURES is set

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The collect-features.sh script checks if TAG_FEATURES is set before actually calling the function. I thought that was easier than checking in all the places the script is called. If you think the check should be outside, I can move it outside

fi
local variant="$1"

rm -f "$RUNTIME_STATE_PATH/audit-stamp"
Expand Down
70 changes: 70 additions & 0 deletions tests/lib/tools/feature_extractor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#!/usr/bin/env python3

import argparse
from collections import defaultdict
import json
from typing import Any, TextIO


# This will be removed
class AllFeature:
name = "all"
parent = "all"

@staticmethod
def maybe_add_feature(feature_dict: dict[str, list[Any]], json_entry: dict[str, Any], state_json: dict[str, Any]):
feature_dict[AllFeature.parent].append({AllFeature.name: json_entry})


FEATURE_LIST = [AllFeature]


def get_feature_dictionary(log_file: TextIO, feature_list: list[str], state_json: dict[str, Any]):
'''
Extracts features from the journal entries and places them in a dictionary.

:param log_file: iterator of journal entries
:param feature_list: list of feature names to extract
:param state_json: dictionary of a state.json
:return: dictionary of features
:raises: ValueError if an invalid feature name is provided
:raises: RuntimeError if a line could not be parsed as json
'''

feature_dict = defaultdict(list)
feature_classes = [cls for cls in FEATURE_LIST
if cls.name in feature_list]
if len(feature_classes) != len(feature_list):
raise ValueError(
"Error: Invalid feature name in feature list {}".format(feature_list))

for line in log_file:
try:
line_json = json.loads(line)
for feature_class in feature_classes:
feature_class.maybe_add_feature(feature_dict, line_json, state_json)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess later on new classes with add feature depending on contents of the log entry?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes exactly. They would check keys and values and see if the log entry is relevant to them. If it's relevant, then they would grab the information they need and, if necessary, query the state.json for any extra info.

except json.JSONDecodeError:
raise RuntimeError("Could not parse line as json: {}".format(line))
return feature_dict


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""Given a set of features with journal entries, each in json format, and a
state.json, this script will search the text file and extract the features. Those
features will be saved in a dictionary and written to the indicated file in output.""")
parser.add_argument('-o', '--output', help='Output file', required=True)
parser.add_argument(
'-f', '--feature', help='Features to extract from journal {all}; can be repeated multiple times', nargs='+')
parser.add_argument(
'-j', '--journal', help='Text file containing journal entries', required=True, type=argparse.FileType('r'))
parser.add_argument(
'-s', '--state', help='state.json', required=True, type=argparse.FileType('r'))
args = parser.parse_args()

try:
state_json = json.load(args.state)
feature_dictionary = get_feature_dictionary(args.journal, args.features, state_json)
json.dump(feature_dictionary, open(args.output, "w"))
except json.JSONDecodeError:
raise RuntimeError("The state.json is not valid json")
1 change: 0 additions & 1 deletion tests/nested/manual/hybrid-remodel/task.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ prepare: |
restore: |
systemctl stop fakedevicesvc.service || true
"${TESTSTOOLS}/store-state" teardown-fake-store "${NESTED_FAKESTORE_BLOB_DIR}"
tests.systemd stop-unit --remove "nested-vm"

debug: |
journalctl -u fakestore.service
Expand Down
Loading