Compare commits

..

No commits in common. "master" and "4.11" have entirely different histories.
master ... 4.11

199 changed files with 1690 additions and 11793 deletions

2
.gitattributes vendored
View file

@ -4,5 +4,3 @@
docs/speeches export-ignore
docs/video export-ignore
docs/src/man7 export-ignore
bin/build-helper export-ignore
README-maintainers export-ignore

1
.gitignore vendored
View file

@ -12,7 +12,6 @@ Session.vim
# Temporary
.netrwhist
*~
*.tmp
# Auto-generated tag files
tags
# Persistent undo

219
Makefile
View file

@ -18,30 +18,36 @@
#
#
.PHONY: help
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo "man build only man user documentation"
@echo "html build only html user documentation"
@echo "docs build both man and html user documentation"
@echo "dotman build man pages for types in your ~/.cdist directory"
@echo "speeches build speeches pdf files"
@echo "install install in the system site-packages directory"
@echo "install-user install in the user site-packages directory"
@echo "docs-clean clean documentation"
@echo "clean clean"
helper=./bin/build-helper
DOCS_SRC_DIR=./docs/src
SPEECHDIR=./docs/speeches
TYPEDIR=./cdist/conf/type
DOCS_SRC_DIR=docs/src
SPEECHDIR=docs/speeches
TYPEDIR=cdist/conf/type
WEBSRCDIR=docs/web
WEBDIR=$$HOME/vcs/www.nico.schottelius.org
WEBBLOG=$(WEBDIR)/blog
WEBBASE=$(WEBDIR)/software/cdist
WEBPAGE=$(WEBBASE).mdwn
CHANGELOG_VERSION=$(shell $(helper) changelog-version)
CHANGELOG_FILE=docs/changelog
PYTHON_VERSION=cdist/version.py
SPHINXM=make -C $(DOCS_SRC_DIR) man
SPHINXH=make -C $(DOCS_SRC_DIR) html
SPHINXC=make -C $(DOCS_SRC_DIR) clean
SHELLCHECKCMD=shellcheck -s sh -f gcc -x
# Skip SC2154 for variables starting with __ since such variables are cdist
# environment variables.
SHELLCHECK_SKIP=grep -v ': __.*is referenced but not assigned.*\[SC2154\]'
################################################################################
# Manpages
#
MAN1DSTDIR=$(DOCS_SRC_DIR)/man1
MAN7DSTDIR=$(DOCS_SRC_DIR)/man7
# Manpages #1: Types
@ -63,16 +69,11 @@ DOCSREFSH=$(DOCS_SRC_DIR)/cdist-reference.rst.sh
$(DOCSREF): $(DOCSREFSH)
$(DOCSREFSH)
version:
@[ -f "cdist/version.py" ] || { \
printf "Missing 'cdist/version.py', please generate it first.\n" && exit 1; \
}
# Manpages #3: generic part
man: version $(MANTYPES) $(DOCSREF)
man: $(MANTYPES) $(DOCSREF) $(PYTHON_VERSION)
$(SPHINXM)
html: version $(MANTYPES) $(DOCSREF)
html: $(MANTYPES) $(DOCSREF) $(PYTHON_VERSION)
$(SPHINXH)
docs: man html
@ -80,6 +81,24 @@ docs: man html
docs-clean:
$(SPHINXC)
# Manpages #5: release part
MANWEBDIR=$(WEBBASE)/man/$(CHANGELOG_VERSION)
HTMLBUILDDIR=docs/dist/html
docs-dist: html
rm -rf "${MANWEBDIR}"
mkdir -p "${MANWEBDIR}"
# mkdir -p "${MANWEBDIR}/man1" "${MANWEBDIR}/man7"
# cp ${MAN1DSTDIR}/*.html ${MAN1DSTDIR}/*.css ${MANWEBDIR}/man1
# cp ${MAN7DSTDIR}/*.html ${MAN7DSTDIR}/*.css ${MANWEBDIR}/man7
cp -R ${HTMLBUILDDIR}/* ${MANWEBDIR}
cd ${MANWEBDIR} && git add . && git commit -m "cdist manpages update: $(CHANGELOG_VERSION)" || true
man-latest-link: web-pub
# Fix ikiwiki, which does not like symlinks for pseudo security
ssh staticweb.ungleich.ch \
"cd /home/services/www/nico/nico.schottelius.org/www/software/cdist/man/ && rm -f latest && ln -sf "$(CHANGELOG_VERSION)" latest"
# Manpages: .cdist Types
DOT_CDIST_PATH=${HOME}/.cdist
DOTMAN7DSTDIR=$(MAN7DSTDIR)
@ -92,7 +111,8 @@ DOTMANTYPES=$(subst /man.rst,.rst,$(DOTMANTYPEPREFIX))
$(DOTMAN7DSTDIR)/cdist-type%.rst: $(DOTTYPEDIR)/%/man.rst
ln -sf "$^" $@
dotman: version $(DOTMANTYPES)
# Manpages #3: generic part
dotman: $(DOTMANTYPES)
$(SPHINXM)
################################################################################
@ -100,6 +120,7 @@ dotman: version $(DOTMANTYPES)
#
SPEECHESOURCES=$(SPEECHDIR)/*.tex
SPEECHES=$(SPEECHESOURCES:.tex=.pdf)
SPEECHESWEBDIR=$(WEBBASE)/speeches
# Create speeches and ensure Toc is up-to-date
$(SPEECHDIR)/%.pdf: $(SPEECHDIR)/%.tex
@ -109,26 +130,160 @@ $(SPEECHDIR)/%.pdf: $(SPEECHDIR)/%.tex
speeches: $(SPEECHES)
speeches-dist: speeches
rm -rf "${SPEECHESWEBDIR}"
mkdir -p "${SPEECHESWEBDIR}"
cp ${SPEECHES} "${SPEECHESWEBDIR}"
cd ${SPEECHESWEBDIR} && git add . && git commit -m "cdist speeches updated" || true
################################################################################
# Misc
# Website
#
clean: docs-clean
BLOGFILE=$(WEBBLOG)/cdist-$(CHANGELOG_VERSION)-released.mdwn
$(BLOGFILE): $(CHANGELOG_FILE)
$(helper) blog $(CHANGELOG_VERSION) $(BLOGFILE)
web-blog: $(BLOGFILE)
web-doc:
# Go to top level, because of cdist.mdwn
rsync -av "$(WEBSRCDIR)/" "${WEBBASE}/.."
cd "${WEBBASE}/.." && git add cdist* && git commit -m "cdist doc update" cdist* || true
web-dist: web-blog web-doc
web-pub: web-dist docs-dist speeches-dist
cd "${WEBDIR}" && make pub
web-release-all: man-latest-link
web-release-all-no-latest: web-pub
################################################################################
# Release: Mailinglist
#
ML_FILE=.lock-ml
# Only send mail once - lock until new changelog things happened
$(ML_FILE): $(CHANGELOG_FILE)
$(helper) ml-release $(CHANGELOG_VERSION)
touch $@
ml-release: $(ML_FILE)
################################################################################
# pypi
#
PYPI_FILE=.pypi-release
$(PYPI_FILE): man $(PYTHON_VERSION)
python3 setup.py sdist upload
touch $@
pypi-release: $(PYPI_FILE)
################################################################################
# archlinux
#
ARCHLINUX_FILE=.lock-archlinux
ARCHLINUXTAR=cdist-$(CHANGELOG_VERSION)-1.src.tar.gz
$(ARCHLINUXTAR): PKGBUILD
umask 022; mkaurball
PKGBUILD: PKGBUILD.in $(PYTHON_VERSION)
./PKGBUILD.in $(CHANGELOG_VERSION)
$(ARCHLINUX_FILE): $(ARCHLINUXTAR) $(PYTHON_VERSION)
burp -c system $(ARCHLINUXTAR)
touch $@
archlinux-release: $(ARCHLINUX_FILE)
################################################################################
# Release
#
$(PYTHON_VERSION) version: .git/refs/heads/master
$(helper) version
# Code that is better handled in a shell script
check-%:
$(helper) $@
release:
$(helper) $@
################################################################################
# Cleanup
#
clean:
rm -f $(DOCS_SRC_DIR)/cdist-reference.rst
find "$(DOCS_SRC_DIR)" -mindepth 2 -type l \
| xargs rm -f
make -C $(DOCS_SRC_DIR) clean
find * -name __pycache__ | xargs rm -rf
# distutils
rm -rf ./build
# Archlinux
rm -f cdist-*.pkg.tar.xz cdist-*.tar.gz
rm -rf pkg/ src/
rm -f MANIFEST PKGBUILD
rm -rf dist/
# Signed release
rm -f cdist-*.tar.gz
rm -f cdist-*.tar.gz.asc
distclean: clean
rm -f cdist/version.py
################################################################################
# install
# Misc
#
install:
python3 setup.py install
# The pub is Nico's "push to all git remotes" way ("make pub")
pub:
git push --mirror
install-user:
python3 setup.py install --user
test:
$(helper) $@
test-remote:
$(helper) $@
pycodestyle pep8:
$(helper) $@
shellcheck-global-explorers:
@find cdist/conf/explorer -type f -exec $(SHELLCHECKCMD) {} + | $(SHELLCHECK_SKIP) || exit 0
shellcheck-type-explorers:
@find cdist/conf/type -type f -path "*/explorer/*" -exec $(SHELLCHECKCMD) {} + | $(SHELLCHECK_SKIP) || exit 0
shellcheck-manifests:
@find cdist/conf/type -type f -name manifest -exec $(SHELLCHECKCMD) {} + | $(SHELLCHECK_SKIP) || exit 0
shellcheck-local-gencodes:
@find cdist/conf/type -type f -name gencode-local -exec $(SHELLCHECKCMD) {} + | $(SHELLCHECK_SKIP) || exit 0
shellcheck-remote-gencodes:
@find cdist/conf/type -type f -name gencode-remote -exec $(SHELLCHECKCMD) {} + | $(SHELLCHECK_SKIP) || exit 0
shellcheck-scripts:
@$(SHELLCHECKCMD) scripts/cdist-dump || exit 0
shellcheck-gencodes: shellcheck-local-gencodes shellcheck-remote-gencodes
shellcheck-types: shellcheck-type-explorers shellcheck-manifests shellcheck-gencodes
shellcheck: shellcheck-global-explorers shellcheck-types shellcheck-scripts
shellcheck-type-files:
@find cdist/conf/type -type f -path "*/files/*" -exec $(SHELLCHECKCMD) {} + | $(SHELLCHECK_SKIP) || exit 0
shellcheck-with-files: shellcheck shellcheck-type-files

View file

@ -9,7 +9,7 @@ pkgver=$version
pkgrel=1
pkgdesc='A Usable Configuration Management System"'
arch=('any')
url='https://www.cdi.st/'
url='http://www.nico.schottelius.org/software/cdist/'
license=('GPL3')
depends=('python>=3.2.0')
source=("http://pypi.python.org/packages/source/c/cdist/cdist-\${pkgver}.tar.gz")

3
README
View file

@ -3,5 +3,4 @@ cdist
cdist is a usable configuration management system.
For the web documentation have a look at https://www.cdi.st/
or at docs/src for reStructuredText manual.
For the web documentation have a look at docs/web/.

View file

@ -1,4 +0,0 @@
Maintainers should use ./bin/build-helper script.
Makefile is intended for end users. It can be used for non-maintaining
targets that can be run from pure source (without git repository).

View file

@ -1,7 +1,6 @@
#!/bin/sh
#
# 2011-2013 Nico Schottelius (nico-cdist at schottelius.org)
# 2016-2019 Darko Poljak (darko.poljak at gmail.com)
#
# This file is part of cdist.
#
@ -19,66 +18,17 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file contains the heavy lifting found usually in the Makefile.
# This file contains the heavy lifting found usually in the Makefile
#
usage() {
printf "usage: %s TARGET [TARGET-ARGS...]
Available targets:
changelog-changes
changelog-version
check-date
check-unittest
ml-release
archlinux-release
pypi-release
release-git-tag
sign-git-release
release
test
test-remote
pycodestyle
pep8
check-pycodestyle
shellcheck-global-explorers
shellcheck-type-explorers
shellcheck-manifests
shellcheck-local-gencodes
shellcheck-remote-gencodes
shellcheck-scripts
shellcheck-gencodes
shellcheck-types
shellcheck
shellcheck-type-files
shellcheck-with-files
shellcheck-build-helper
check-shellcheck
version-branch
version
target-version
clean
distclean\n" "$1"
}
basedir=${0%/*}/../
# Change to checkout directory
cd "$basedir"
basename="${0##*/}"
if [ $# -lt 1 ]
then
usage "${basename}"
exit 1
fi
version=$(git describe)
option=$1; shift
SHELLCHECKCMD="shellcheck -s sh -f gcc -x"
# Skip SC2154 for variables starting with __ since such variables are cdist
# environment variables.
SHELLCHECK_SKIP=': __.*is referenced but not assigned.*\[SC2154\]'
# Change to checkout directory
basedir="${0%/*}/../"
cd "$basedir"
case "$option" in
changelog-changes)
if [ "$#" -eq 1 ]; then
@ -116,8 +66,8 @@ case "$option" in
date_changelog=$(grep '^[[:digit:]]' "$basedir/docs/changelog" | head -n1 | sed 's/.*: //')
if [ "$date_today" != "$date_changelog" ]; then
printf "Date in changelog is not today\n"
printf "Changelog date: %s\n" "${date_changelog}"
echo "Date in changelog is not today"
echo "Changelog: $date_changelog"
exit 1
fi
;;
@ -126,17 +76,54 @@ case "$option" in
"$0" test
;;
blog)
version=$1; shift
blogfile=$1; shift
dir=${blogfile%/*}
file=${blogfile##*/}
cat << eof > "$blogfile"
[[!meta title="Cdist $version released"]]
Here's a short overview about the changes found in version ${version}:
eof
$0 changelog-changes "$version" >> "$blogfile"
cat << eof >> "$blogfile"
For more information visit the [[cdist homepage|software/cdist]].
[[!tag cdist config unix]]
eof
cd "$dir"
git add "$file"
# Allow git commit to fail if there are no changes
git commit -m "cdist blog update: $version" "$blogfile" || true
;;
ml-release)
if [ $# -ne 1 ]; then
printf "%s ml-release version\n" "$0" >&2
echo "$0 ml-release version" >&2
exit 1
fi
version=$1; shift
to_a=cdist
to_d=l.schottelius.org
to=${to_a}@${to_d}
from_a=nico-cdist
from_d=schottelius.org
from=${from_a}@${from_d}
(
cat << eof
Subject: cdist $version has been released
From: Nico -telmich- Schottelius <$from>
To: cdist mailing list <$to>
Subject: cdist $version released
Hello .*,
@ -147,41 +134,25 @@ eof
"$0" changelog-changes "$version"
cat << eof
Cheers,
Nico
--
Automatisation at its best level. With cdist.
eof
) > mailinglist.tmp
) | /usr/sbin/sendmail -f "$from" "$to"
;;
archlinux-release)
if [ $# -ne 1 ]; then
printf "%s archlinux-release version\n" "$0" >&2
exit 1
fi
version=$1; shift
ARCHLINUXTAR="cdist-${version}-1.src.tar.gz"
./PKGBUILD.in "${version}"
umask 022
mkaurball
burp -c system "${ARCHLINUXTAR}"
;;
pypi-release)
# Ensure that pypi release has the right version
"$0" version
make docs-clean
make docs
python3 setup.py sdist upload
;;
release-git-tag)
target_version=$($0 changelog-version)
if git rev-parse --verify "refs/tags/${target_version}" 2>/dev/null; then
printf "Tag for %s exists, aborting\n" "${target_version}"
if git rev-parse --verify refs/tags/$target_version 2>/dev/null; then
echo "Tag for $target_version exists, aborting"
exit 1
fi
printf "Enter tag description for %s: " "${target_version}"
read -r tagmessage
printf "Enter tag description for ${target_version}: "
read tagmessage
# setup for signed tags:
# gpg --fulL-gen-key
@ -199,8 +170,7 @@ eof
# gpg --verify <asc-file> <file>
# gpg --no-default-keyring --keyring <pubkey.gpg> --verify <asc-file> <file>
# Ensure gpg-agent is running.
GPG_TTY=$(tty)
export GPG_TTY
export GPG_TTY=$(tty)
gpg-agent
git tag -s "$target_version" -m "$tagmessage"
@ -210,14 +180,14 @@ eof
sign-git-release)
if [ $# -lt 2 ]
then
printf "usage: %s sign-git-release TAG TOKEN [ARCHIVE]\n" "$0"
printf "usage: $0 sign-git-release TAG TOKEN [ARCHIVE]\n"
printf " if ARCHIVE is not specified then it is created\n"
exit 1
fi
tag="$1"
if ! git rev-parse -q --verify "${tag}" >/dev/null 2>&1
then
printf "Tag \"%s\" not found.\n" "${tag}"
printf "Tag \"${tag}\" not found.\n"
exit 1
fi
token="$2"
@ -239,35 +209,34 @@ eof
fi
gpg --armor --detach-sign "${archivename}" || exit 1
project="ungleich-public%2Fcdist"
sed_cmd='s/^.*"markdown":"\([^"]*\)".*$/\1/'
# make github release
curl -H "Authorization: token ${token}" \
--request POST \
--data "{ \"tag_name\":\"${tag}\", \
\"target_commitish\":\"master\", \
\"name\": \"${tag}\", \
\"body\":\"${tag}\", \
\"draft\":false, \
\"prerelease\": false}" \
"https://api.github.com/repos/ungleich/cdist/releases" || exit 1
# upload archive
response_archive=$(curl -f -X POST \
--http1.1 \
-H "PRIVATE-TOKEN: ${token}" \
-F "file=@${archivename}" \
"https://code.ungleich.ch/api/v4/projects/${project}/uploads" \
| sed "${sed_cmd}") || exit 1
# get release ID
repoid=$(curl "https://api.github.com/repos/ungleich/cdist/releases/tags/${tag}" \
| python3 -c 'import json; import sys; print(json.loads(sys.stdin.read())["id"])') \
|| exit 1
# upload archive signature
response_archive_sig=$(curl -f -X POST \
--http1.1 \
-H "PRIVATE-TOKEN: ${token}" \
-F "file=@${archivename}.asc" \
"https://code.ungleich.ch/api/v4/projects/${project}/uploads" \
| sed "${sed_cmd}") || exit 1
# make release
changelog=$("$0" changelog-changes "$1" | sed 's/^[[:space:]]*//')
release_notes=$(
printf "%s\n\n%s\n\n**Changelog**\n\n%s\n" \
"${response_archive}" "${response_archive_sig}" "${changelog}"
)
curl -f -X POST \
-H "PRIVATE-TOKEN: ${token}" \
-F "description=${release_notes}" \
"https://code.ungleich.ch/api/v4/projects/${project}/repository/tags/${tag}/release" \
# upload archive and then signature
curl -H "Authorization: token ${token}" \
-H "Accept: application/vnd.github.manifold-preview" \
-H "Content-Type: application/x-gtar" \
--data-binary @${archivename} \
"https://uploads.github.com/repos/ungleich/cdist/releases/${repoid}/assets?name=${archivename}" \
|| exit 1
curl -H "Authorization: token ${token}" \
-H "Accept: application/vnd.github.manifold-preview" \
-H "Content-Type: application/pgp-signature" \
--data-binary @${archivename}.asc \
"https://uploads.github.com/repos/ungleich/cdist/releases/${repoid}/assets?name=${archivename}.asc" \
|| exit 1
# remove generated files (archive and asc)
@ -283,30 +252,30 @@ eof
target_version=$($0 changelog-version)
target_branch=$($0 version-branch)
printf "Beginning release process for %s\n" "${target_version}"
echo "Beginning release process for $target_version"
# First check everything is sane
"$0" check-date
"$0" check-unittest
"$0" check-pycodestyle
"$0" check-shellcheck
"$0" shellcheck
# Generate version file to be included in packaging
"$0" target-version
# Ensure the git status is clean, else abort
if ! git diff-index --name-only --exit-code HEAD ; then
printf "Unclean tree, see files above, aborting.\n"
echo "Unclean tree, see files above, aborting"
exit 1
fi
# Ensure we are on the master branch
masterbranch=yes
if [ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]; then
printf "Releases are happening from the master branch, aborting.\n"
echo "Releases are happening from the master branch, aborting"
printf "Enter the magic word to release anyway:"
read -r magicword
echo "Enter the magic word to release anyway"
read magicword
if [ "$magicword" = "iknowwhatido" ]; then
masterbranch=no
@ -317,7 +286,7 @@ eof
if [ "$masterbranch" = yes ]; then
# Ensure version branch exists
if ! git rev-parse --verify "refs/heads/${target_branch}" 2>/dev/null; then
if ! git rev-parse --verify refs/heads/$target_branch 2>/dev/null; then
git branch "$target_branch"
fi
@ -335,12 +304,20 @@ eof
make docs-clean
make docs
# Generate speeches (indirect check if they build)
make speeches
#############################################################
# Everything green, let's do the release
# Tag the current commit
"$0" release-git-tag
# sign git tag
printf "Enter github authentication token: "
read token
"$0" sign-git-release "${target_version}" "${token}"
# Also merge back the version branch
if [ "$masterbranch" = yes ]; then
git checkout master
@ -348,41 +325,41 @@ eof
fi
# Publish git changes
# if you want to have mirror locally then uncomment this and comment below
# git push --mirror
git push
# push also new branch and set up tracking
git push -u origin "${target_branch}"
# fi
make pub
# publish man, speeches, website
if [ "$masterbranch" = yes ]; then
make web-release-all
else
make web-release-all-no-latest
fi
# Ensure that pypi release has the right version
"$0" version
# Create and publish package for pypi
"$0" pypi-release
make pypi-release
# sign git tag
printf "Enter upstream repository authentication token: "
read -r token
"$0" sign-git-release "${target_version}" "${token}"
# Archlinux release is based on pypi
make archlinux-release
# Announce change on ML
"$0" ml-release "${target_version}"
make ml-release
cat << eof
Manual steps post release:
- cdist-web
- send mail body generated in mailinglist.tmp and inform Dmitry for deb
- linkedin
- hackernews
- reddit
- twitter
eof
;;
test)
if [ ! -f "cdist/version.py" ]
then
printf "cdist/version.py is missing, generate it first.\n"
exit 1
fi
PYTHONPATH="$(pwd -P)"
export PYTHONPATH
export PYTHONPATH="$(pwd -P)"
if [ $# -lt 1 ]; then
python3 -m cdist.test
@ -392,15 +369,7 @@ eof
;;
test-remote)
if [ ! -f "cdist/version.py" ]
then
printf "cdist/version.py is missing, generate it first.\n"
exit 1
fi
PYTHONPATH="$(pwd -P)"
export PYTHONPATH
export PYTHONPATH="$(pwd -P)"
python3 -m cdist.test.exec.remote
;;
@ -413,9 +382,9 @@ eof
printf "\\nPlease review pycodestyle report.\\n"
while true
do
printf "Continue (yes/no)?\n"
echo "Continue (yes/no)?"
any=
read -r any
read any
case "$any" in
yes)
break
@ -424,74 +393,20 @@ eof
exit 1
;;
*)
printf "Please answer with 'yes' or 'no' explicitly.\n"
echo "Please answer with 'yes' or 'no' explicitly."
;;
esac
done
;;
shellcheck-global-explorers)
find cdist/conf/explorer -type f -exec ${SHELLCHECKCMD} {} + | grep -v "${SHELLCHECK_SKIP}" || exit 0
;;
shellcheck-type-explorers)
find cdist/conf/type -type f -path "*/explorer/*" -exec ${SHELLCHECKCMD} {} + | grep -v "${SHELLCHECK_SKIP}" || exit 0
;;
shellcheck-manifests)
find cdist/conf/type -type f -name manifest -exec ${SHELLCHECKCMD} {} + | grep -v "${SHELLCHECK_SKIP}" || exit 0
;;
shellcheck-local-gencodes)
find cdist/conf/type -type f -name gencode-local -exec ${SHELLCHECKCMD} {} + | grep -v "${SHELLCHECK_SKIP}" || exit 0
;;
shellcheck-remote-gencodes)
find cdist/conf/type -type f -name gencode-remote -exec ${SHELLCHECKCMD} {} + | grep -v "${SHELLCHECK_SKIP}" || exit 0
;;
shellcheck-scripts)
${SHELLCHECKCMD} scripts/cdist-dump scripts/cdist-new-type || exit 0
;;
shellcheck-gencodes)
"$0" shellcheck-local-gencodes
"$0" shellcheck-remote-gencodes
;;
shellcheck-types)
"$0" shellcheck-type-explorers
"$0" shellcheck-manifests
"$0" shellcheck-gencodes
;;
shellcheck)
"$0" shellcheck-global-explorers
"$0" shellcheck-types
"$0" shellcheck-scripts
;;
shellcheck-type-files)
find cdist/conf/type -type f -path "*/files/*" -exec ${SHELLCHECKCMD} {} + | grep -v "${SHELLCHECK_SKIP}" || exit 0
;;
shellcheck-with-files)
"$0" shellcheck
"$0" shellcheck-type-files
;;
shellcheck-build-helper)
${SHELLCHECKCMD} ./bin/build-helper
;;
check-shellcheck)
"$0" shellcheck
make helper=${helper} WEBDIR=${WEBDIR} shellcheck
printf "\\nPlease review shellcheck report.\\n"
while true
do
printf "Continue (yes/no)?\n"
echo "Continue (yes/no)?"
any=
read -r any
read any
case "$any" in
yes)
break
@ -500,7 +415,7 @@ eof
exit 1
;;
*)
printf "Please answer with 'yes' or 'no' explicitly.\n"
echo "Please answer with 'yes' or 'no' explicitly."
;;
esac
done
@ -511,39 +426,16 @@ eof
;;
version)
printf "VERSION = \"%s\"\n" "$(git describe)" > cdist/version.py
echo "VERSION = \"$(git describe)\"" > cdist/version.py
;;
target-version)
target_version=$($0 changelog-version)
printf "VERSION = \"%s\"\n" "${target_version}" > cdist/version.py
echo "VERSION = \"${target_version}\"" > cdist/version.py
;;
clean)
make clean
# Archlinux
rm -f cdist-*.pkg.tar.xz cdist-*.tar.gz
rm -rf pkg/ src/
rm -f MANIFEST PKGBUILD
rm -rf dist/
# Signed release
rm -f cdist-*.tar.gz
rm -f cdist-*.tar.gz.asc
# Temp files
rm -f ./*.tmp
;;
distclean)
"$0" clean
rm -f cdist/version.py
;;
*)
printf "Unknown target: '%s'.\n" "${option}" >&2
usage "${basename}"
echo "Unknown helper target $@ - aborting"
exit 1
;;

482
bin/build-helper.freebsd Executable file
View file

@ -0,0 +1,482 @@
#!/bin/sh
#
# 2011-2013 Nico Schottelius (nico-cdist at schottelius.org)
# 2016 Darko Poljak (darko.poljak at gmail.com)
#
# This file is part of cdist.
#
# cdist is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cdist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file contains the heavy lifting found usually in the Makefile
#
# vars for make
helper=$0
basedir=${0%/*}/../
# run_as is used to check how the script is called (by $0 value)
# currently supported sufixes for $0 are:
# .freebsd - run as freebsd
basename=${0##*/}
run_as=${basename#*.}
case "$run_as" in
freebsd)
to_a=cdist-configuration-management
to_d=googlegroups.com
from_a=darko.poljak
from_d=gmail.com
ml_name="Darko Poljak"
ml_sig_name="Darko"
# vars for make
WEBDIR=../vcs/www.nico.schottelius.org
;;
*)
to_a=cdist
to_d=l.schottelius.org
from_a=nico-cdist
from_d=schottelius.org
ml_name="Nico -telmich- Schottelius"
ml_sig_name="Nico"
# vars for make
WEBDIR=$$HOME/vcs/www.nico.schottelius.org
;;
esac
# Change to checkout directory
cd "$basedir"
version=$(git describe)
option=$1; shift
case "$option" in
print-make-vars)
printf "helper: ${helper}\n"
printf "WEBDIR: ${WEBDIR}\n"
;;
print-runas)
printf "run_as: $run_as\n"
;;
changelog-changes)
if [ "$#" -eq 1 ]; then
start=$1
else
start="[[:digit:]]"
fi
end="[[:digit:]]"
awk -F: "BEGIN { start=0 }
{
if(start == 0) {
if (\$0 ~ /^$start/) {
start = 1
}
} else {
if (\$0 ~ /^$end/) {
exit
} else {
print \$0
}
}
}" "$basedir/docs/changelog"
;;
changelog-version)
# get version from changelog
grep '^[[:digit:]]' "$basedir/docs/changelog" | head -n1 | sed 's/:.*//'
;;
check-date)
# verify date in changelog is today
date_today="$(date +%Y-%m-%d)"
date_changelog=$(grep '^[[:digit:]]' "$basedir/docs/changelog" | head -n1 | sed 's/.*: //')
if [ "$date_today" != "$date_changelog" ]; then
echo "Date in changelog is not today"
echo "Changelog: $date_changelog"
exit 1
fi
;;
check-unittest)
"$0" test
;;
blog)
version=$1; shift
blogfile=$1; shift
dir=${blogfile%/*}
file=${blogfile##*/}
cat << eof > "$blogfile"
[[!meta title="Cdist $version released"]]
Here's a short overview about the changes found in version ${version}:
eof
$0 changelog-changes "$version" >> "$blogfile"
cat << eof >> "$blogfile"
For more information visit the [[cdist homepage|software/cdist]].
[[!tag cdist config unix]]
eof
cd "$dir"
git add "$file"
# Allow git commit to fail if there are no changes
git commit -m "cdist blog update: $version" "$blogfile" || true
;;
ml-release)
if [ $# -ne 1 ]; then
echo "$0 ml-release version" >&2
exit 1
fi
version=$1; shift
to=${to_a}@${to_d}
from=${from_a}@${from_d}
(
cat << eof
From: ${ml_name} <$from>
To: cdist mailing list <$to>
Subject: cdist $version released
Hello .*,
cdist $version has been released with the following changes:
eof
"$0" changelog-changes "$version"
cat << eof
Cheers,
${ml_sig_name}
--
Automatisation at its best level. With cdist.
eof
) | /usr/sbin/sendmail -f "$from" "$to"
;;
release-git-tag)
target_version=$($0 changelog-version)
if git rev-parse --verify refs/tags/$target_version 2>/dev/null; then
echo "Tag for $target_version exists, aborting"
exit 1
fi
printf "Enter tag description for ${target_version}: "
read tagmessage
# setup for signed tags:
# gpg --fulL-gen-key
# gpg --list-secret-keys --keyid-format LONG
# git config --local user.signingkey <id>
# for exporting pub key:
# gpg --armor --export <id> > pubkey.asc
# gpg --output pubkey.gpg --export <id>
# show tag with signature
# git show <tag>
# verify tag signature
# git tag -v <tag>
#
# gpg verify signature
# gpg --verify <asc-file> <file>
# gpg --no-default-keyring --keyring <pubkey.gpg> --verify <asc-file> <file>
# Ensure gpg-agent is running.
export GPG_TTY=$(tty)
gpg-agent
git tag -s "$target_version" -m "$tagmessage"
git push --tags
;;
sign-git-release)
if [ $# -lt 2 ]
then
printf "usage: $0 sign-git-release TAG TOKEN [ARCHIVE]\n"
printf " if ARCHIVE is not specified then it is created\n"
exit 1
fi
tag="$1"
if ! git rev-parse -q --verify "${tag}" >/dev/null 2>&1
then
printf "Tag \"${tag}\" not found.\n"
exit 1
fi
token="$2"
if [ $# -gt 2 ]
then
archivename="$3"
else
archivename="cdist-${tag}.tar"
git archive --prefix="cdist-${tag}/" -o "${archivename}" "${tag}" \
|| exit 1
# make sure target version is generated
"$0" target-version
tar -x -f "${archivename}" || exit 1
cp cdist/version.py "cdist-${tag}/cdist/version.py" || exit 1
tar -c -f "${archivename}" "cdist-${tag}/" || exit 1
rm -r -f "cdist-${tag}/"
gzip "${archivename}" || exit 1
archivename="${archivename}.gz"
fi
gpg --armor --detach-sign "${archivename}" || exit 1
# make github release
curl -H "Authorization: token ${token}" \
--request POST \
--data "{ \"tag_name\":\"${tag}\", \
\"target_commitish\":\"master\", \
\"name\": \"${tag}\", \
\"body\":\"${tag}\", \
\"draft\":false, \
\"prerelease\": false}" \
"https://api.github.com/repos/ungleich/cdist/releases" || exit 1
# get release ID
repoid=$(curl "https://api.github.com/repos/ungleich/cdist/releases/tags/${tag}" \
| python3 -c 'import json; import sys; print(json.loads(sys.stdin.read())["id"])') \
|| exit 1
# upload archive and then signature
curl -H "Authorization: token ${token}" \
-H "Accept: application/vnd.github.manifold-preview" \
-H "Content-Type: application/x-gtar" \
--data-binary @${archivename} \
"https://uploads.github.com/repos/ungleich/cdist/releases/${repoid}/assets?name=${archivename}" \
|| exit 1
curl -H "Authorization: token ${token}" \
-H "Accept: application/vnd.github.manifold-preview" \
-H "Content-Type: application/pgp-signature" \
--data-binary @${archivename}.asc \
"https://uploads.github.com/repos/ungleich/cdist/releases/${repoid}/assets?name=${archivename}.asc" \
|| exit 1
# remove generated files (archive and asc)
if [ $# -eq 2 ]
then
rm -f "${archivename}"
fi
rm -f "${archivename}.asc"
;;
release)
set -e
target_version=$($0 changelog-version)
target_branch=$($0 version-branch)
echo "Beginning release process for $target_version"
# First check everything is sane
"$0" check-date
"$0" check-unittest
"$0" check-pycodestyle
"$0" shellcheck
# Generate version file to be included in packaging
"$0" target-version
# Ensure the git status is clean, else abort
if ! git diff-index --name-only --exit-code HEAD ; then
echo "Unclean tree, see files above, aborting"
exit 1
fi
# Ensure we are on the master branch
masterbranch=yes
if [ "$(git rev-parse --abbrev-ref HEAD)" != "master" ]; then
echo "Releases are happening from the master branch, aborting"
echo "Enter the magic word to release anyway"
read magicword
if [ "$magicword" = "iknowwhatido" ]; then
masterbranch=no
else
exit 1
fi
fi
if [ "$masterbranch" = yes ]; then
# Ensure version branch exists
if ! git rev-parse --verify refs/heads/$target_branch 2>/dev/null; then
git branch "$target_branch"
fi
# Merge master branch into version branch
git checkout "$target_branch"
git merge master
fi
# Verify that after the merge everything works
"$0" check-date
"$0" check-unittest
# Generate documentation (man and html)
# First, clean old generated docs
make helper=${helper} WEBDIR=${WEBDIR} docs-clean
make helper=${helper} WEBDIR=${WEBDIR} docs
# Generate speeches (indirect check if they build)
make helper=${helper} WEBDIR=${WEBDIR} speeches
#############################################################
# Everything green, let's do the release
# Tag the current commit
"$0" release-git-tag
# sign git tag
printf "Enter github authentication token: "
read token
"$0" sign-git-release "${target_version}" "${token}"
# Also merge back the version branch
if [ "$masterbranch" = yes ]; then
git checkout master
git merge "$target_branch"
fi
# Publish git changes
case "$run_as" in
freebsd)
# if we are not Nico :) then just push, no mirror
git push
# push also new branch and set up tracking
git push -u origin "${target_branch}"
;;
*)
make helper=${helper} WEBDIR=${WEBDIR} pub
;;
esac
# Ensure that pypi release has the right version
"$0" version
# Create and publish package for pypi
make helper=${helper} WEBDIR=${WEBDIR} pypi-release
# publish man, speeches, website
if [ "$masterbranch" = yes ]; then
make helper=${helper} WEBDIR=${WEBDIR} web-release-all
else
make helper=${helper} WEBDIR=${WEBDIR} web-release-all-no-latest
fi
case "$run_as" in
freebsd)
;;
*)
# Archlinux release is based on pypi
make archlinux-release
;;
esac
# Announce change on ML
make helper=${helper} WEBDIR=${WEBDIR} ml-release
;;
test)
export PYTHONPATH="$(pwd -P)"
if [ $# -lt 1 ]; then
python3 -m cdist.test
else
python3 -m unittest "$@"
fi
;;
test-remote)
export PYTHONPATH="$(pwd -P)"
python3 -m cdist.test.exec.remote
;;
pycodestyle|pep8)
pycodestyle "${basedir}" "${basedir}/scripts/cdist" | less
;;
check-pycodestyle)
"$0" pycodestyle
printf "\\nPlease review pycodestyle report.\\n"
while true
do
echo "Continue (yes/no)?"
any=
read any
case "$any" in
yes)
break
;;
no)
exit 1
;;
*)
echo "Please answer with 'yes' or 'no' explicitly."
;;
esac
done
;;
shellcheck)
make helper=${helper} WEBDIR=${WEBDIR} shellcheck
printf "\\nPlease review shellcheck report.\\n"
while true
do
echo "Continue (yes/no)?"
any=
read any
case "$any" in
yes)
break
;;
no)
exit 1
;;
*)
echo "Please answer with 'yes' or 'no' explicitly."
;;
esac
done
;;
version-branch)
"$0" changelog-version | cut -d. -f '1,2'
;;
version)
echo "VERSION = \"$(git describe)\"" > cdist/version.py
;;
target-version)
target_version=$($0 changelog-version)
echo "VERSION = \"${target_version}\"" > cdist/version.py
;;
*)
echo "Unknown helper target $@ - aborting"
exit 1
;;
esac

View file

@ -5,23 +5,21 @@ import logging
import collections
import functools
import cdist.configuration
import cdist.preos
# set of beta sub-commands
BETA_COMMANDS = set(('install', 'inventory', ))
# set of beta arguments for sub-commands
BETA_ARGS = {
'config': set(('tag', 'all_tagged_hosts', 'use_archiving', )),
'config': set(('jobs', 'tag', 'all_tagged_hosts', 'use_archiving', )),
}
EPILOG = "Get cdist at https://code.ungleich.ch/ungleich-public/cdist"
EPILOG = "Get cdist at http://www.nico.schottelius.org/software/cdist/"
# Parser others can reuse
parser = None
_verbosity_level_off = -2
_verbosity_level = {
None: logging.WARNING,
_verbosity_level_off: logging.OFF,
-1: logging.ERROR,
0: logging.WARNING,
@ -193,7 +191,8 @@ def get_parsers():
name="positive int"),
help=('Operate in parallel in specified maximum number of jobs. '
'Global explorers, object prepare and object run are '
'supported. Without argument CPU count is used by default. '),
'supported. Without argument CPU count is used by default. '
'Currently in beta.'),
action='store', dest='jobs',
const=multiprocessing.cpu_count())
parser['config_main'].add_argument(
@ -424,9 +423,6 @@ def get_parsers():
parser['inventory'].set_defaults(
func=cdist.inventory.Inventory.commandline)
# PreOs
parser['preos'] = parser['sub'].add_parser('preos', add_help=False)
# Shell
parser['shell'] = parser['sub'].add_parser(
'shell', parents=[parser['loglevel']])

View file

@ -29,7 +29,7 @@ case "$uname_s" in
Linux)
(pgrep -P0 -l | awk '/^1[ \t]/ {print $2;}') || true
;;
FreeBSD|OpenBSD)
FreeBSD)
ps -o comm= -p 1 || true
;;
*)

View file

@ -18,11 +18,13 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
if command -v ip >/dev/null
if command -v ip > /dev/null
then
ip -o link show | sed -n 's/^[0-9]\+: \(.\+\): <.*/\1/p'
elif command -v ifconfig >/dev/null
ip -o link show | sed -n 's/^[0-9]\+: \(.\+\): <.*/\1/p'
elif command -v ifconfig > /dev/null
then
ifconfig -a | sed -n -E 's/^(.*)(:[[:space:]]*flags=|Link encap).*/\1/p'
fi \
| sort -u
ifconfig -a \
| sed -n -E 's/^(.*)(:[[:space:]]*flags=|Link encap).*/\1/p' \
| sort -u
fi

View file

@ -145,7 +145,7 @@ esac
if [ -f /etc/os-release ]; then
# already lowercase, according to:
# https://www.freedesktop.org/software/systemd/man/os-release.html
awk -F= '/^ID=/ { if ($2 ~ /^'"'"'(.*)'"'"'$/ || $2 ~ /^"(.*)"$/) { print substr($2, 2, length($2) - 2) } else { print $2 } }' /etc/os-release
awk -F= '/^ID=/ {print $2;}' /etc/os-release
exit 0
fi

View file

@ -18,22 +18,30 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
# TODO check if filesystem has ACL turned on etc
[ ! -e "/$__object_id" ] && exit 0
if [ -f "$__object/parameter/acl" ]
then
grep -E '^(default:)?(user|group):' "$__object/parameter/acl" \
| while read -r acl
for parameter in user group
do
if [ ! -f "$__object/parameter/$parameter" ]
then
continue
fi
while read -r acl
do
param="$( echo "$acl" | awk -F: '{print $(NF-2)}' )"
check="$( echo "$acl" | awk -F: '{print $(NF-1)}' )"
check="$( echo "$acl" | awk -F: '{print $1}' )"
[ "$param" = 'user' ] && db=passwd || db="$param"
if ! getent "$db" "$check" > /dev/null
if [ "$parameter" = 'user' ]
then
echo "missing $param '$check'" >&2
exit 1
getent_db=passwd
else
getent_db="$parameter"
fi
done
fi
if ! getent "$getent_db" "$check" > /dev/null
then
echo "missing $parameter '$check'"
fi
done \
< "$__object/parameter/$parameter"
done

View file

@ -20,65 +20,59 @@
file_is="$( cat "$__object/explorer/file_is" )"
[ "$file_is" = 'missing' ] && [ -z "$__cdist_dry_run" ] && exit 0
[ "$file_is" = 'missing' ] && exit 0
os="$( cat "$__global/explorer/os" )"
missing_users_groups="$( cat "$__object/explorer/missing_users_groups" )"
acl_path="/$__object_id"
acl_is="$( cat "$__object/explorer/acl_is" )"
if [ -f "$__object/parameter/acl" ]
if [ -n "$missing_users_groups" ]
then
acl_should="$( cat "$__object/parameter/acl" )"
elif
[ -f "$__object/parameter/user" ] \
|| [ -f "$__object/parameter/group" ] \
|| [ -f "$__object/parameter/mask" ] \
|| [ -f "$__object/parameter/other" ]
then
acl_should="$( for param in user group mask other
do
[ ! -f "$__object/parameter/$param" ] && continue
echo "$param" | grep -Eq 'mask|other' && sep=:: || sep=:
echo "$param$sep$( cat "$__object/parameter/$param" )"
done )"
else
echo 'no parameters set' >&2
echo "$missing_users_groups" >&2
exit 1
fi
if [ -f "$__object/parameter/default" ]
os="$( cat "$__global/explorer/os" )"
acl_is="$( cat "$__object/explorer/acl_is" )"
acl_path="/$__object_id"
if [ -f "$__object/parameter/default" ] && [ "$file_is" = 'directory' ]
then
acl_should="$( echo "$acl_should" \
| sed 's/^default://' \
| sort -u \
| sed 's/\(.*\)/default:\1\n\1/' )"
set_default=1
else
set_default=0
fi
if [ "$file_is" = 'regular' ] \
&& echo "$acl_should" | grep -Eq '^default:'
then
# only directories can have default ACLs,
# but instead of error,
# let's just remove default entries
acl_should="$( echo "$acl_should" | grep -Ev '^default:' )"
fi
acl_should="$( for parameter in user group mask other
do
if [ ! -f "$__object/parameter/$parameter" ]
then
continue
fi
if echo "$acl_should" | awk -F: '{ print $NF }' | grep -Fq 'X'
then
[ "$file_is" = 'directory' ] && rep=x || rep=-
while read -r acl
do
if echo "$acl" | awk -F: '{ print $NF }' | grep -Fq 'X'
then
[ "$file_is" = 'directory' ] && rep=x || rep=-
acl_should="$( echo "$acl_should" | sed "s/\\(.*\\)X/\\1$rep/" )"
fi
acl="$( echo "$acl" | sed "s/\(.*\)X/\1$rep/" )"
fi
echo "$parameter" | grep -Eq '(mask|other)' && sep=:: || sep=:
echo "$parameter$sep$acl"
[ "$set_default" = '1' ] && echo "default:$parameter$sep$acl"
done \
< "$__object/parameter/$parameter"
done )"
setfacl_exec='setfacl'
if [ -f "$__object/parameter/recursive" ]
then
if echo "$os" | grep -Fq 'freebsd'
if echo "$os" | grep -Eq 'macosx|freebsd'
then
echo "$os setfacl do not support recursive operations" >&2
else
@ -88,36 +82,44 @@ fi
if [ -f "$__object/parameter/remove" ]
then
echo "$acl_is" | while read -r acl
do
# skip wanted ACL entries which already exist
# and skip mask and other entries, because we
# can't actually remove them, but only change.
if echo "$acl_should" | grep -Eq "^$acl" \
|| echo "$acl" | grep -Eq '^(default:)?(mask|other)'
then continue
fi
if echo "$os" | grep -Fq 'solaris'
then
# Solaris setfacl behaves differently.
# We will not support Solaris for now, because no way to test it.
# But adding support should be easy (use -s instead of -m on modify).
echo "$os setfacl do not support -x flag for ACL remove" >&2
else
echo "$acl_is" | while read -r acl
do
# Skip wanted ACL entries which already exist
# and skip mask and other entries, because we
# can't actually remove them, but only change.
if echo "$acl_should" | grep -Eq "^$acl" \
|| echo "$acl" | grep -Eq '^(default:)?(mask|other)'
then continue
fi
if echo "$os" | grep -Fq 'freebsd'
then
remove="$acl"
else
remove="$( echo "$acl" | sed 's/:...$//' )"
fi
if echo "$os" | grep -Eq 'macosx|freebsd'
then
remove="$acl"
else
remove="$( echo "$acl" | sed 's/:...$//' )"
fi
echo "$setfacl_exec -x \"$remove\" \"$acl_path\""
echo "removed '$remove'" >> "$__messages_out"
done
echo "$setfacl_exec -x \"$remove\" \"$acl_path\""
echo "removed '$remove'" >> "$__messages_out"
done
fi
fi
for acl in $acl_should
do
if ! echo "$acl_is" | grep -Eq "^$acl"
then
if echo "$os" | grep -Fq 'freebsd' \
if echo "$os" | grep -Eq 'macosx|freebsd' \
&& echo "$acl" | grep -Eq '^default:'
then
echo "setting default ACL in $os is currently not supported" >&2
echo "setting default ACL in $os is currently not supported. sorry :(" >&2
else
echo "$setfacl_exec -m \"$acl\" \"$acl_path\""
echo "added '$acl'" >> "$__messages_out"

View file

@ -8,36 +8,46 @@ cdist-type__acl - Set ACL entries
DESCRIPTION
-----------
Fully supported and tested on Linux (ext4 filesystem), partial support for FreeBSD.
ACL must be defined as 3-symbol combination, using ``r``, ``w``, ``x`` and ``-``.
Fully supported on Linux (tested on Debian and CentOS).
Partial support for FreeBSD, OSX and Solaris.
OpenBSD and NetBSD support is not possible.
See ``setfacl`` and ``acl`` manpages for more details.
REQUIRED MULTIPLE PARAMETERS
OPTIONAL MULTIPLE PARAMETERS
----------------------------
acl
Set ACL entry following ``getfacl`` output syntax.
user
Add user ACL entry.
group
Add group ACL entry.
OPTIONAL PARAMETERS
-------------------
mask
Add mask ACL entry.
other
Add other ACL entry.
BOOLEAN PARAMETERS
------------------
default
Set all ACL entries as default too.
Only directories can have default ACLs.
Setting default ACL in FreeBSD is currently not supported.
recursive
Make ``setfacl`` recursive (Linux only), but not ``getfacl`` in explorer.
default
Add default ACL entries (FreeBSD not supported).
remove
Remove undefined ACL entries.
``mask`` and ``other`` entries can't be removed, but only changed.
DEPRECATED PARAMETERS
---------------------
Parameters ``user``, ``group``, ``mask`` and ``other`` are deprecated and they
will be removed in future versions. Please use ``acl`` parameter instead.
Remove undefined ACL entries (Solaris not supported).
ACL entries for ``mask`` and ``other`` can't be removed.
EXAMPLES
@ -46,30 +56,15 @@ EXAMPLES
.. code-block:: sh
__acl /srv/project \
--default \
--recursive \
--remove \
--acl user:alice:rwx \
--acl user:bob:r-x \
--acl group:project-group:rwx \
--acl group:some-other-group:r-x \
--acl mask::r-x \
--acl other::r-x
# give Alice read-only access to subdir,
# but don't allow her to see parent content.
__acl /srv/project2 \
--remove \
--acl default:group:secret-project:rwx \
--acl group:secret-project:rwx \
--acl user:alice:--x
__acl /srv/project2/subdir \
--default \
--remove \
--acl group:secret-project:rwx \
--acl user:alice:r-x
--user alice:rwx \
--user bob:r-x \
--group project-group:rwx \
--group some-other-group:r-x \
--mask r-x \
--other r-x
AUTHORS

View file

@ -1 +0,0 @@
see manual for details

View file

@ -1 +0,0 @@
see manual for details

View file

@ -1 +0,0 @@
see manual for details

View file

@ -1 +0,0 @@
see manual for details

View file

@ -1,3 +1,2 @@
acl
user
group

View file

@ -27,18 +27,6 @@ else
keyid="$__object_id"
fi
keydir="$(cat "$__object/parameter/keydir")"
keyfile="$keydir/$__object_id.gpg"
if [ -d "$keydir" ]
then
if [ -f "$keyfile" ]
then echo present
else echo absent
fi
else
# fallback to deprecated apt-key
apt-key export "$keyid" | head -n 1 | grep -Fqe "BEGIN PGP PUBLIC KEY BLOCK" \
&& echo present \
|| echo absent
fi
apt-key export "$keyid" | head -n 1 | grep -Fqe "BEGIN PGP PUBLIC KEY BLOCK" \
&& echo present \
|| echo absent

View file

@ -31,84 +31,12 @@ if [ "$state_should" = "$state_is" ]; then
exit 0
fi
keydir="$(cat "$__object/parameter/keydir")"
keyfile="$keydir/$__object_id.gpg"
case "$state_should" in
present)
keyserver="$(cat "$__object/parameter/keyserver")"
if [ -f "$__object/parameter/uri" ]; then
uri="$(cat "$__object/parameter/uri")"
if [ -d "$keydir" ]; then
cat << EOF
curl -s -L \\
-o "$keyfile" \\
"$uri"
key="\$( cat "$keyfile" )"
if echo "\$key" | grep -Fq 'BEGIN PGP PUBLIC KEY BLOCK'
then
echo "\$key" | gpg --dearmor > "$keyfile"
fi
EOF
else
# fallback to deprecated apt-key
echo "curl -s -L '$uri' | apt-key add -"
fi
elif [ -d "$keydir" ]; then
tmp='/tmp/cdist_apt_key_tmp'
# we need to kill gpg after 30 seconds, because gpg
# can get stuck if keyserver is not responding.
# exporting env var and not exit 1,
# because we need to clean up and kill dirmngr.
cat << EOF
mkdir -m 700 -p "$tmp"
if timeout 30s \\
gpg --homedir "$tmp" \\
--keyserver "$keyserver" \\
--recv-keys "$keyid"
then
gpg --homedir "$tmp" \\
--export "$keyid" \\
> "$keyfile"
else
export GPG_GOT_STUCK=1
fi
GNUPGHOME="$tmp" gpgconf --kill dirmngr
rm -rf "$tmp"
if [ -n "\$GPG_GOT_STUCK" ]
then
echo "GPG GOT STUCK - no response from keyserver after 30 seconds" >&2
exit 1
fi
EOF
else
# fallback to deprecated apt-key
echo "apt-key adv --keyserver \"$keyserver\" --recv-keys \"$keyid\""
fi
echo "added '$keyid'" >> "$__messages_out"
echo "apt-key adv --keyserver \"$keyserver\" --recv-keys \"$keyid\""
;;
absent)
if [ -f "$keyfile" ]; then
echo "rm '$keyfile'"
else
# fallback to deprecated apt-key
echo "apt-key del \"$keyid\""
fi
echo "removed '$keyid'" >> "$__messages_out"
echo "apt-key del \"$keyid\""
;;
esac

View file

@ -28,12 +28,6 @@ keyserver
the keyserver from which to fetch the key. If omitted the default set
in ./parameter/default/keyserver is used.
keydir
key save location, defaults to ``/etc/apt/trusted.pgp.d``
uri
the URI from which to download the key
EXAMPLES
--------
@ -53,20 +47,15 @@ EXAMPLES
# same thing with other keyserver
__apt_key UbuntuArchiveKey --keyid 437D05B5 --keyserver keyserver.ubuntu.com
# download key from the internet
__apt_key rabbitmq \
--uri http://www.rabbitmq.com/rabbitmq-signing-key-public.asc
AUTHORS
-------
Steven Armstrong <steven-cdist--@--armstrong.cc>
Ander Punnar <ander-at-kvlt-dot-ee>
COPYING
-------
Copyright \(C) 2011-2019 Steven Armstrong and Ander Punnar. You can
redistribute it and/or modify it under the terms of the GNU General Public
License as published by the Free Software Foundation, either version 3 of the
Copyright \(C) 2011-2014 Steven Armstrong. You can redistribute it
and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.

View file

@ -1,8 +0,0 @@
#!/bin/sh -e
__package gnupg
if [ -f "$__object/parameter/uri" ]
then __package curl
else __package dirmngr
fi

View file

@ -1 +0,0 @@
/etc/apt/trusted.gpg.d

View file

@ -1,5 +1,3 @@
state
keyid
keyserver
keydir
uri

View file

@ -18,11 +18,6 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
# quote function from http://www.etalabs.net/sh_tricks.html
quote() {
printf '%s\n' "$1" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/"
}
file="$(cat "$__object/parameter/file" 2>/dev/null || echo "/$__object_id")"
state_should=$(cat "$__object/parameter/state")
prefix=$(cat "$__object/parameter/prefix" 2>/dev/null || echo "#cdist:__block/$__object_id")
@ -51,7 +46,7 @@ tmpfile=\$(mktemp ${file}.cdist.XXXXXXXXXX)
if [ -f "$file" ]; then
cp -p "$file" "\$tmpfile"
fi
awk -v prefix=^$(quote "$prefix")\$ -v suffix=^$(quote "$suffix")\$ '
awk -v prefix="^$prefix\$" -v suffix="^$suffix\$" '
{
if (match(\$0,prefix)) {
triggered=1

View file

@ -30,7 +30,7 @@ username
source
Select the source from which to clone cdist from.
Defaults to "git@code.ungleich.ch:ungleich-public/cdist.git".
Defaults to "git://github.com/ungleich/cdist.git".
branch
@ -47,7 +47,7 @@ EXAMPLES
__cdist /home/cdist/cdist
# Use alternative source
__cdist --source "git@code.ungleich.ch:ungleich-public/cdist.git" /home/cdist/cdist
__cdist --source "git://github.com/ungleich/cdist" /home/cdist/cdist
AUTHORS

View file

@ -1 +1 @@
git@code.ungleich.ch:ungleich-public/cdist.git
git://github.com/ungleich/cdist.git

View file

@ -1 +0,0 @@
886614099 103959898 consul

View file

@ -1 +0,0 @@
https://releases.hashicorp.com/consul/1.5.0/consul_1.5.0_linux_amd64.zip

View file

@ -42,7 +42,7 @@ source_file_name="${source##*/}"
cksum_should=$(cut -d' ' -f1,2 "$version_dir/cksum")
cat << eof
tmpdir=\$(mktemp -d -p /tmp "${__type##*/}.XXXXXXXXXX")
tmpdir=\$(mktemp -d --tmpdir="/tmp" "${__type##*/}.XXXXXXXXXX")
curl -s -L "$source" > "\$tmpdir/$source_file_name"
unzip -p "\$tmpdir/$source_file_name" > "${destination}.tmp"
rm -rf "\$tmpdir"

View file

@ -24,7 +24,7 @@
os=$(cat "$__global/explorer/os")
case "$os" in
alpine|scientific|centos|redhat|ubuntu|debian|devuan|archlinux|gentoo)
scientific|centos|redhat|ubuntu|debian|devuan|archlinux|gentoo)
# any linux should work
:
;;
@ -47,7 +47,6 @@ fi
if [ -f "$__object/parameter/direct" ]; then
__package unzip
__package curl
else
__staged_file /usr/local/bin/consul \
--source "$(cat "$version_dir/source")" \

View file

@ -1,38 +0,0 @@
#!/sbin/openrc-run
# 2019 Nico Schottelius (nico-cdist at schottelius.org)
description="consul agent"
pidfile="${CONSUL_PIDFILE:-"/var/run/$RC_SVCNAME/pidfile"}"
command="${CONSUL_BINARY:-"/usr/local/bin/consul"}"
checkconfig() {
if [ ! -d /var/run/consul ] ; then
mkdir -p /var/run/consul || return 1
chown consul:consul /var/run/$NAME || return 1
chmod 2770 /var/run/$NAME || return 1
fi
}
start() {
need net
start-stop-daemon --start --quiet --oknodo \
--pidfile "$pidfile" --background \
--exec $command -- agent -pid-file="$pidfile" -config-dir /etc/consul/conf.d
}
start_pre() {
checkconfig
}
stop() {
if [ "${RC_CMD}" = "restart" ] ; then
checkconfig || return 1
fi
ebegin "Stopping $RC_SVCNAME"
start-stop-daemon --stop --exec "$command" \
--pidfile "$pidfile" --quiet
eend $?
}

View file

@ -1,7 +1,7 @@
#!/bin/sh -e
#
# 2015 Steven Armstrong (steven-cdist at armstrong.cc)
# 2015-2019 Nico Schottelius (nico-cdist at schottelius.org)
# 2015 Nico Schottelius (nico-cdist at schottelius.org)
#
# This file is part of cdist.
#
@ -23,7 +23,7 @@
os=$(cat "$__global/explorer/os")
case "$os" in
alpine|scientific|centos|debian|devuan|redhat|ubuntu)
scientific|centos|debian|devuan|redhat|ubuntu)
# whitelist safeguard
:
;;
@ -181,25 +181,22 @@ init_upstart()
# Install init script to start on boot
case "$os" in
alpine|devuan)
init_sysvinit debian
;;
centos|redhat)
os_version="$(sed 's/[^0-9.]//g' "$__global/explorer/os_version")"
major_version="${os_version%%.*}"
case "$major_version" in
[456])
init_sysvinit redhat
;;
7)
init_systemd
;;
*)
echo "Unsupported CentOS/Redhat version: $os_version" >&2
exit 1
;;
esac
;;
centos|redhat)
os_version="$(sed 's/[^0-9.]//g' "$__global/explorer/os_version")"
major_version="${os_version%%.*}"
case "$major_version" in
[456])
init_sysvinit redhat
;;
7)
init_systemd
;;
*)
echo "Unsupported CentOS/Redhat version: $os_version" >&2
exit 1
;;
esac
;;
debian)
os_version=$(cat "$__global/explorer/os_version")
@ -217,9 +214,13 @@ case "$os" in
exit 1
;;
esac
;;
;;
devuan)
init_sysvinit debian
;;
ubuntu)
init_upstart
;;
;;
esac

View file

@ -64,43 +64,6 @@ case "$os" in
require="__apt_source/docker" __package docker-ce --state "${state}"
fi
;;
devuan)
os_version="$(cat "$__global/explorer/os_version")"
case "$os_version" in
ascii)
distribution="stretch"
;;
jessie)
distribution="jessie"
;;
*)
echo "Your devuan release ($os_version) is currently not supported by this type (${__type##*/}).">&2
echo "Please contribute an implementation for it if you can." >&2
exit 1
;;
esac
if [ "${state}" = "present" ]; then
__package apt-transport-https
__package ca-certificates
__package gnupg2
fi
__apt_key_uri docker --name "Docker Release (CE deb) <docker@docker.com>" \
--uri "https://download.docker.com/linux/${os}/gpg" --state "${state}"
require="__apt_key_uri/docker" __apt_source docker \
--uri "https://download.docker.com/linux/${os}" \
--distribution "${distribution}" \
--state "${state}" \
--component "stable"
if [ "$version" != "latest" ]; then
require="__apt_source/docker" __package docker-ce --version "${version}" --state "${state}"
else
require="__apt_source/docker" __package docker-ce --state "${state}"
fi
;;
*)
echo "Your operating system ($os) is currently not supported by this type (${__type##*/})." >&2
echo "Please contribute an implementation for it if you can." >&2

View file

@ -18,4 +18,4 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
docker info 2>/dev/null | grep '^ *Swarm: ' | awk '{print $2}'
docker info 2>/dev/null | grep "^Swarm: " | cut -d " " -f 2-

View file

@ -19,33 +19,32 @@
#
#
state_is=$(cat "$__object/explorer/state")
owner_is=$(cat "$__object/explorer/owner")
group_is=$(cat "$__object/explorer/group")
state_is="$(cat "$__object/explorer/state")"
owner_is="$(cat "$__object/explorer/owner")"
group_is="$(cat "$__object/explorer/group")"
state_should=$(cat "$__object/parameter/state")
state_should="$(cat "$__object/parameter/state")"
branch=$(cat "$__object/parameter/branch")
branch="$(cat "$__object/parameter/branch")"
source=$(cat "$__object/parameter/source")
source="$(cat "$__object/parameter/source")"
destination="/$__object_id"
owner=$(cat "$__object/parameter/owner")
group=$(cat "$__object/parameter/group")
mode=$(cat "$__object/parameter/mode")
owner="$(cat "$__object/parameter/owner")"
group="$(cat "$__object/parameter/group")"
mode="$(cat "$__object/parameter/mode")"
[ -f "$__object/parameter/recursive" ] && recursive='--recursive' || recursive=''
[ "$state_should" = "$state_is" ] \
&& [ "$owner" = "$owner_is" ] \
&& [ "$group" = "$group_is" ] \
&& [ -n "$mode" ] && exit 0
[ "$state_should" = "$state_is" ] && \
[ "$owner" = "$owner_is" ] && \
[ "$group" = "$group_is" ] && \
[ -n "$mode" ] && exit 0
case $state_should in
present)
if [ "$state_should" != "$state_is" ]; then
echo git clone --quiet "$recursive" --branch "$branch" "$source" "$destination"
echo git clone --quiet --branch "$branch" "$source" "$destination"
fi
if { [ -n "$owner" ] && [ "$owner_is" != "$owner" ]; } || \
{ [ -n "$group" ] && [ "$group_is" != "$group" ]; }; then
@ -55,9 +54,8 @@ case $state_should in
echo chmod -R "$mode" "$destination"
fi
;;
# Handled in manifest
absent)
# Handled in manifest
;;
*)

View file

@ -35,8 +35,6 @@ mode
owner
User to chown to.
recursive
Passes the --recursive flag to git when cloning the repository.
EXAMPLES
--------
@ -46,7 +44,7 @@ EXAMPLES
__git /home/services/dokuwiki --source git://github.com/splitbrain/dokuwiki.git
# Checkout cdist, stay on branch 2.1
__git /home/nico/cdist --source git@code.ungleich.ch:ungleich-public/cdist.git --branch 2.1
__git /home/nico/cdist --source git://github.com/ungleich/cdist.git --branch 2.1
AUTHORS

View file

@ -1 +0,0 @@
recursive

View file

@ -8,12 +8,10 @@ case $os in
debian|devuan)
case $os_version in
8*|jessie)
# Differntation not needed anymore
apt_source_distribution=stable
apt_source_distribution=jessie
;;
9*|ascii/ceres|ascii)
# Differntation not needed anymore
apt_source_distribution=stable
apt_source_distribution=stretch
;;
*)
echo "Don't know how to install Grafana on $os $os_version. Send us a pull request!" >&2
@ -23,10 +21,10 @@ case $os in
__apt_key_uri grafana \
--name 'Grafana Release Signing Key' \
--uri https://packages.grafana.com/gpg.key
--uri https://packagecloud.io/gpg.key
require="$require __apt_key_uri/grafana" __apt_source grafana \
--uri https://packages.grafana.com/oss/deb \
--uri https://packagecloud.io/grafana/stable/debian/ \
--distribution $apt_source_distribution \
--component main

View file

@ -1,7 +1,6 @@
#!/bin/sh
#
# 2011-2015 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Dennis Camera (dennis.camera at ssrq-sds-fds.ch)
#
# This file is part of cdist.
#
@ -22,21 +21,7 @@
# Get an existing groups group entry.
#
not_supported() {
echo "Your operating system ($("$__explorer/os")) is currently not supported." >&2
echo "Cannot extract group information." >&2
echo "Please contribute an implementation for it if you can." >&2
exit 1
}
name=$__object_id
if command -v getent >/dev/null
then
getent group "$name" || true
elif [ -f /etc/group ]
then
grep "^${name}:" /etc/group || true
else
not_supported
fi
getent group "$name" || true

View file

@ -1,7 +1,6 @@
#!/bin/sh
#
# 2011-2015 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Dennis Camera (dennis.camera at ssrq-sds-fds.ch)
#
# This file is part of cdist.
#
@ -23,28 +22,13 @@
#
name=$__object_id
os=$("$__explorer/os")
os="$("$__explorer/os")"
not_supported() {
echo "Your operating system ($os) is currently not supported." >&2
echo "Cannot extract group information." >&2
echo "Please contribute an implementation for it if you can." >&2
exit 1
}
case $os in
"freebsd"|"netbsd")
echo "$os does not have getent gshadow" >&2
exit 0
;;
case "$os" in
"freebsd"|"netbsd")
echo "$os does not have getent gshadow"
exit 0
;;
esac
if command -v getent >/dev/null
then
getent gshadow "$name" || true
elif [ -f /etc/gshadow ]
then
grep "^${name}:" /etc/gshadow || true
else
not_supported
fi
getent gshadow "$name" || true

View file

@ -33,9 +33,6 @@ if [ -z "${certbot_fullpath}" ]; then
require="__apt_source/stretch-backports" __package_apt certbot \
--target-release stretch-backports
;;
10*)
__package_apt certbot
;;
*)
echo "Unsupported OS version: $os_version" >&2
exit 1
@ -65,12 +62,11 @@ if [ -z "${certbot_fullpath}" ]; then
--distribution ascii-backports \
--component main
require="__apt_source/ascii-backports" __package_apt python-certbot \
--target-release ascii-backports
require="__apt_source/ascii-backports" __package_apt certbot \
--target-release ascii-backports
;;
beowulf*)
__package_apt certbot
;;
*)
echo "Unsupported OS version: $os_version" >&2
exit 1

View file

@ -27,10 +27,6 @@ else
name="$__object_id"
fi
# Remove the @.. repo tag for finding out whether it is installed
# f.i. pass@testing => pass
name="$(echo "$name" | sed 's/@.*//')"
if [ "$(apk list -I "$name")" ]; then
echo present
else

View file

@ -1,7 +1,6 @@
#!/bin/sh -e
#
# 2012-2014 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Nico Schottelius (nico-cdist at schottelius.org)
#
# This file is part of cdist.
#
@ -23,7 +22,7 @@
os=$(cat "$__global/explorer/os")
case "$os" in
alpine|ubuntu|debian|archlinux|suse|scientific|centos|devuan)
ubuntu|debian|archlinux|suse|scientific|centos|devuan)
__package postfix --state present
;;
*)

View file

@ -22,7 +22,7 @@
os=$("$__explorer/os")
case "$os" in
alpine|ubuntu|debian|archlinux|suse|scientific|centos|devuan)
ubuntu|debian|archlinux|suse|scientific|centos|devuan)
:
;;
*)

View file

@ -1,7 +1,6 @@
#!/bin/sh -e
#
# 2012-2014 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Nico Schottelius (nico-cdist at schottelius.org)
#
# This file is part of cdist.
#
@ -22,7 +21,7 @@
os=$(cat "$__global/explorer/os")
case "$os" in
alpine|archlinux|centos|debian|devuan|suse|scientific|ubuntu)
ubuntu|debian|archlinux|suse|scientific|centos|devuan)
:
;;
*)

View file

@ -34,7 +34,7 @@ esac
name="$__object_id"
if test -n "$(su - "$postgres_user" -c "psql postgres -twAc \"SELECT 1 FROM pg_database WHERE datname='$name'\"")"
if test -n "$(su - "$postgres_user" -c "psql postgres -tAc \"SELECT 1 FROM pg_database WHERE datname='$name'\"")"
then
echo 'present'
else

View file

@ -34,7 +34,7 @@ esac
name="$__object_id"
if test -n "$(su - "$postgres_user" -c "psql postgres -twAc \"SELECT 1 FROM pg_roles WHERE rolname='$name'\"")"
if test -n "$(su - "$postgres_user" -c "psql postgres -tAc \"SELECT 1 FROM pg_roles WHERE rolname='$name'\"")"
then
echo 'present'
else

View file

@ -55,7 +55,7 @@ case "$state_should" in
[ -n "$password" ] && password="PASSWORD '$password'"
cmd="CREATE ROLE $name WITH $password $booleans"
echo "su - '$postgres_user' -c \"psql postgres -wc \\\"$cmd\\\"\""
echo "su - '$postgres_user' -c \"psql postgres -c \\\"$cmd\\\"\""
;;
absent)
echo "su - '$postgres_user' -c \"dropuser \\\"$name\\\"\""

View file

@ -1,7 +1,6 @@
#!/bin/sh
#
# 2014 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Dennis Camera (dennis.camera at ssrq-sds-fds.ch)
#
# This file is part of cdist.
#
@ -20,42 +19,9 @@
#
if [ -f "$__object/parameter/file" ]; then
cat "$__object/parameter/file"
cat "$__object/parameter/file"
else
if [ -s "$__object/parameter/owner" ]
then
owner=$(cat "$__object/parameter/owner")
else
owner="$__object_id"
fi
if command -v getent >/dev/null
then
owner_line=$(getent passwd "$owner")
elif [ -f /etc/passwd ]
then
case $owner
in
[0-9][0-9]*)
owner_line=$(awk -F: "\$3 == \"${owner}\" { print }" /etc/passwd)
;;
*)
owner_line=$(awk -F: "\$1 == \"${owner}\" { print }" /etc/passwd)
;;
esac
fi
if [ "$owner_line" ]
then
home=$(echo "$owner_line" | cut -d':' -f6)
fi
if [ ! -d "$home" ]
then
# Don't know how to determine user's home directory, fall back to ~
home="~$owner"
command -v realpath >/dev/null && home=$(realpath "$home")
fi
[ -d "$home" ] && echo "$home/.ssh/authorized_keys"
owner="$(cat "$__object/parameter/owner" 2>/dev/null || echo "$__object_id")"
home=$(getent passwd "$owner" | cut -d':' -f 6)
echo "$home/.ssh/authorized_keys"
fi

View file

@ -1,7 +1,6 @@
#!/bin/sh
#
# 2014 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Dennis Camera (dennis.camera at ssrq-sds-fds.ch)
#
# This file is part of cdist.
#
@ -19,28 +18,6 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
if [ -s "$__object/parameter/owner" ]
then
owner=$(cat "$__object/parameter/owner")
else
owner="$__object_id"
fi
if command -v getent >/dev/null
then
gid=$(getent passwd "$owner" | cut -d':' -f4)
getent group "$gid" || true
else
# Fallback to local file scanning
case $owner
in
[0-9][0-9]*)
gid=$(awk -F: "\$3 == \"${owner}\" { print \$4 }" /etc/passwd)
;;
*)
gid=$(awk -F: "\$1 == \"${owner}\" { print \$4 }" /etc/passwd)
;;
esac
awk -F: "\$3 == \"$gid\" { print }" /etc/group
fi
owner="$(cat "$__object/parameter/owner" 2>/dev/null || echo "$__object_id")"
gid="$(getent passwd "$owner" | cut -d':' -f 4)"
getent group "$gid" || true

View file

@ -23,12 +23,6 @@ owner="$(cat "$__object/parameter/owner" 2>/dev/null || echo "$__object_id")"
state="$(cat "$__object/parameter/state" 2>/dev/null)"
file="$(cat "$__object/explorer/file")"
if [ ! -f "$__object/parameter/nofile" ] && [ -z "$file" ]
then
echo "Cannot determine path of authorized_keys file" >&2
exit 1
fi
if [ ! -f "$__object/parameter/noparent" ] || [ ! -f "$__object/parameter/nofile" ]; then
group="$(cut -d':' -f 1 "$__object/explorer/group")"
if [ -z "$group" ]; then

View file

@ -1,7 +1,6 @@
#!/bin/sh
#
# 2014 Steven Armstrong (steven-cdist at armstrong.cc)
# 2019 Dennis Camera (dennis.camera at ssrq-sds-fds.ch)
#
# This file is part of cdist.
#
@ -19,11 +18,5 @@
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
gid=$("$__type_explorer/passwd" | cut -d':' -f4)
if command -v getent >/dev/null
then
getent group "$gid" || true
else
awk -F: "\$3 == \"$gid\" { print }" /etc/group
fi
gid="$("$__type_explorer/passwd" | cut -d':' -f 4)"
getent group "$gid" || true

View file

@ -2,7 +2,6 @@
#
# 2012 Steven Armstrong (steven-cdist at armstrong.cc)
# 2014 Nico Schottelius (nico-cdist at schottelius.org)
# 2019 Dennis Camera (dennis.camera at ssrq-sds-fds.ch)
#
# This file is part of cdist.
#
@ -22,16 +21,4 @@
owner="$__object_id"
if command -v getent >/dev/null
then
getent passwd "$owner" || true
else
case $owner in
[0-9][0-9]*)
awk -F: "\$3 == \"$owner\" { print }" /etc/passwd
;;
*)
grep "^$owner:" /etc/passwd || true
;;
esac
fi
getent passwd "$owner" || true

View file

@ -2,7 +2,6 @@
#
# 2014 Steven Armstrong (steven-cdist at armstrong.cc)
# 2018 Takashi Yoshi (takashi at yoshi.email)
# 2019 Nico Schottelius (nico-cdist at schottelius.org)
#
# This file is part of cdist.
#
@ -25,7 +24,7 @@ os=$(cat "$__global/explorer/os")
case "$os" in
# Linux
alpine|redhat|centos|ubuntu|debian|devuan|archlinux|coreos)
redhat|centos|ubuntu|debian|devuan|archlinux|coreos)
:
;;
# BSD

View file

@ -23,9 +23,11 @@
if [ -f "$__object/parameter/gid" ]; then
gid=$(cat "$__object/parameter/gid")
if command -v getent >/dev/null; then
getent group "$gid" || true
getent=$(command -v getent)
if [ X != X"${getent}" ]; then
"${getent}" group "$gid" || true
elif [ -f /etc/group ]; then
grep -E "^(${gid}|([^:]+:){2}${gid}):" /etc/group || true
fi
fi

View file

@ -23,8 +23,9 @@
name=$__object_id
if command -v getent >/dev/null; then
getent passwd "$name" || true
getent=$(command -v getent)
if [ X != X"${getent}" ]; then
"${getent}" passwd "$name" || true
elif [ -f /etc/passwd ]; then
grep "^${name}:" /etc/passwd || true
fi

View file

@ -1,4 +1,4 @@
#!/bin/sh -e
#!/bin/sh
#
# 2011 Steven Armstrong (steven-cdist at armstrong.cc)
#
@ -22,19 +22,18 @@
#
name=$__object_id
os="$("$__explorer/os")"
# Default to using shadow passwords
database="shadow"
case $("$__explorer/os") in
'freebsd'|'netbsd'|'openbsd')
database='passwd'
;;
# Default to using shadow passwords
*)
database='shadow'
;;
case "$os" in
"freebsd"|"netbsd"|"openbsd") database="passwd";;
esac
if command -v getent >/dev/null; then
getent "$database" "$name" || true
getent=$(command -v getent)
if [ X != X"${getent}" ]; then
"${getent}" "$database" "$name" || true
elif [ -f /etc/shadow ]; then
grep "^${name}:" /etc/shadow || true
fi

View file

@ -1,32 +0,0 @@
#!/bin/sh -e
#
# 2019 Nico Schottelius (nico-cdist at schottelius.org)
#
# This file is part of cdist.
#
# cdist is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cdist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
#
# Manage users.
os=$(cat "$__global/explorer/os")
case "$os" in
alpine)
__package shadow
;;
*)
:
;;
esac

View file

@ -46,10 +46,10 @@ EXAMPLES
# Ensure that internal SLES11 SP3 RIS is in installed and all other services and repos are discarded
__zypper_service INTERNAL_SLES11_SP3 --service_desc "Internal SLES11 SP3 RIS" --uri "http://path/to/your/ris/dir" --remove-all-other-services --remove-all-repos
# Ensure that internal SLES11 SP3 RIS is in installed, no changes to other services or repos
# Ensure that internal SLES11 SP3 RIS is in installed, no changes to ohter services or repos
__zypper_service INTERNAL_SLES11_SP3 --service_desc "Internal SLES11 SP3 RIS" --uri "http://path/to/your/ris/dir"
# Drop service by uri, no changes to other services or repos
# Drop service by uri, no changes to ohter services or repos
__zypper_service INTERNAL_SLES11_SP3 --state absent --uri "http://path/to/your/ris/dir"

View file

@ -43,33 +43,6 @@ from cdist import core, inventory
from cdist.util.remoteutil import inspect_ssh_mux_opts
def graph_check_cycle(graph):
# Start from each node in the graph and check for cycle starting from it.
for node in graph:
# Cycle path.
path = [node]
has_cycle = _graph_dfs_cycle(graph, node, path)
if has_cycle:
return has_cycle, path
return False, None
def _graph_dfs_cycle(graph, node, path):
for neighbour in graph.get(node, ()):
# If node is already in path then this is cycle.
if neighbour in path:
path.append(neighbour)
return True
path.append(neighbour)
rv = _graph_dfs_cycle(graph, neighbour, path)
if rv:
return True
# Remove last item from list - neighbour whose DFS path we have have
# just checked.
del path[-1]
return False
class Config(object):
"""Cdist main class to hold arbitrary data"""
@ -104,12 +77,9 @@ class Config(object):
self.remove_remote_files_dirs = remove_remote_files_dirs
self.explorer = core.Explorer(self.local.target_host, self.local,
self.remote, jobs=self.jobs,
dry_run=self.dry_run)
self.manifest = core.Manifest(self.local.target_host, self.local,
dry_run=self.dry_run)
self.code = core.Code(self.local.target_host, self.local, self.remote,
dry_run=self.dry_run)
self.remote, jobs=self.jobs)
self.manifest = core.Manifest(self.local.target_host, self.local)
self.code = core.Code(self.local.target_host, self.local, self.remote)
def _init_files_dirs(self):
"""Prepare files and directories for the run"""
@ -284,14 +254,14 @@ class Config(object):
cls.onehost(host, host_tags, host_base_path, hostdir,
args, parallel=False,
configuration=configuration)
except cdist.Error:
except cdist.Error as e:
failed_hosts.append(host)
if args.parallel and len(process_args) == 1:
log.debug("Only 1 host for parallel processing, doing it "
"sequentially")
try:
cls.onehost(*process_args[0])
except cdist.Error:
except cdist.Error as e:
failed_hosts.append(host)
elif args.parallel:
log.trace("Multiprocessing start method is {}".format(
@ -683,28 +653,6 @@ class Config(object):
self.__dict__.update(state)
self._open_logger()
def _validate_dependencies(self):
'''
Build dependency graph for unfinished objects and
check for cycles.
'''
graph = {}
for cdist_object in self.object_list():
obj_name = cdist_object.name
if obj_name not in graph:
graph[obj_name] = []
if cdist_object.state == cdist_object.STATE_DONE:
continue
for requirement in cdist_object.requirements_unfinished(
cdist_object.requirements):
graph[obj_name].append(requirement.name)
for requirement in cdist_object.requirements_unfinished(
cdist_object.autorequire):
graph[obj_name].append(requirement.name)
return graph_check_cycle(graph)
def iterate_until_finished(self):
"""
Go through all objects and solve them
@ -714,12 +662,6 @@ class Config(object):
objects_changed = True
while objects_changed:
# Check for cycles as early as possible.
has_cycle, path = self._validate_dependencies()
if has_cycle:
raise cdist.UnresolvableRequirementsError(
"Cycle detected in object dependencies:\n{}!".format(
" -> ".join(path)))
objects_changed = self.iterate_once()
# Check whether all objects have been finished
@ -758,29 +700,8 @@ class Config(object):
("The requirements of the following objects could not be "
"resolved:\n%s") % ("\n".join(info_string)))
def _handle_deprecation(self, cdist_object):
cdist_type = cdist_object.cdist_type
deprecated = cdist_type.deprecated
if deprecated is not None:
if deprecated:
self.log.warning("Type %s is deprecated: %s", cdist_type.name,
deprecated)
else:
self.log.warning("Type %s is deprecated.", cdist_type.name)
for param in cdist_object.parameters:
if param in cdist_type.deprecated_parameters:
msg = cdist_type.deprecated_parameters[param]
if msg:
format = "%s parameter of type %s is deprecated: %s"
args = [param, cdist_type.name, msg]
else:
format = "%s parameter of type %s is deprecated."
args = [param, cdist_type.name]
self.log.warning(format, *args)
def object_prepare(self, cdist_object, transfer_type_explorers=True):
"""Prepare object: Run type explorer + manifest"""
self._handle_deprecation(cdist_object)
self.log.verbose("Preparing object {}".format(cdist_object.name))
self.log.verbose(
"Running manifest and explorers for " + cdist_object.name)

View file

@ -69,7 +69,6 @@ class CdistType(object):
self.__optional_multiple_parameters = None
self.__boolean_parameters = None
self.__parameter_defaults = None
self.__deprecated_parameters = None
def __hash__(self):
return hash(self.name)
@ -134,17 +133,6 @@ class CdistType(object):
cannot run in parallel."""
return os.path.isfile(os.path.join(self.absolute_path, "nonparallel"))
@property
def deprecated(self):
"""Get type deprecation message. If message is None then type
is not deprecated."""
deprecated_path = os.path.join(self.absolute_path, "deprecated")
try:
with open(deprecated_path, 'r') as f:
return f.read()
except FileNotFoundError:
return None
@property
def explorers(self):
"""Return a list of available explorers"""
@ -276,23 +264,3 @@ class CdistType(object):
finally:
self.__parameter_defaults = defaults
return self.__parameter_defaults
@property
def deprecated_parameters(self):
if not self.__deprecated_parameters:
deprecated = {}
try:
deprecated_dir = os.path.join(self.absolute_path,
"parameter",
"deprecated")
for name in cdist.core.listdir(deprecated_dir):
try:
with open(os.path.join(deprecated_dir, name)) as fd:
deprecated[name] = fd.read().strip()
except EnvironmentError:
pass # Swallow errors raised by open() or read()
except EnvironmentError:
pass # Swallow error raised by os.listdir()
finally:
self.__deprecated_parameters = deprecated
return self.__deprecated_parameters

View file

@ -97,7 +97,7 @@ class Code(object):
"""
# target_host is tuple (target_host, target_hostname, target_fqdn)
def __init__(self, target_host, local, remote, dry_run=False):
def __init__(self, target_host, local, remote):
self.target_host = target_host
self.local = local
self.remote = remote
@ -113,9 +113,6 @@ class Code(object):
local.log),
}
if dry_run:
self.env['__cdist_dry_run'] = '1'
def _run_gencode(self, cdist_object, which):
cdist_type = cdist_object.cdist_type
script = os.path.join(self.local.type_path,

View file

@ -67,7 +67,7 @@ class Explorer(object):
"""Executes cdist explorers.
"""
def __init__(self, target_host, local, remote, jobs=None, dry_run=False):
def __init__(self, target_host, local, remote, jobs=None):
self.target_host = target_host
self._open_logger()
@ -84,10 +84,6 @@ class Explorer(object):
'__cdist_log_level_name': util.log_level_name_env_var_val(
self.log),
}
if dry_run:
self.env['__cdist_dry_run'] = '1'
self._type_explorers_transferred = []
self.jobs = jobs

View file

@ -96,7 +96,7 @@ class Manifest(object):
"""Executes cdist manifests.
"""
def __init__(self, target_host, local, dry_run=False):
def __init__(self, target_host, local):
self.target_host = target_host
self.local = local
@ -117,9 +117,6 @@ class Manifest(object):
self.log),
}
if dry_run:
self.env['__cdist_dry_run'] = '1'
def _open_logger(self):
self.log = logging.getLogger(self.target_host[0])

View file

@ -319,9 +319,7 @@ class Emulator(object):
lastcreatedtype)
else:
if 'require' in self.env:
appendix = " " + lastcreatedtype
if appendix not in self.env['require']:
self.env['require'] += appendix
self.env['require'] += " " + lastcreatedtype
else:
self.env['require'] = lastcreatedtype
self.log.debug(("Injecting require for "

View file

@ -315,7 +315,7 @@ class InventoryHost(Inventory):
hostpath = self._host_path(host)
self.log.trace("hostpath: {}".format(hostpath))
if self.action == "add" and not os.path.exists(hostpath):
self._new_hostpath(hostpath)
self._new_hostpath(hostpath)
else:
if not os.path.isfile(hostpath):
raise cdist.Error(("Host path \'{}\' is"

View file

@ -1,101 +0,0 @@
import os
import os.path
import sys
import inspect
import argparse
import cdist
import logging
_PREOS_CALL = "commandline"
_PREOS_NAME = "_preos_name"
_PREOS_MARKER = "_cdist_preos"
_PLUGINS_DIR = "preos"
_PLUGINS_PATH = [os.path.join(os.path.dirname(__file__), _PLUGINS_DIR), ]
cdist_home = cdist.home_dir()
if cdist_home:
cdist_home_preos = os.path.join(cdist_home, "preos")
if os.path.isdir(cdist_home_preos):
_PLUGINS_PATH.append(cdist_home_preos)
sys.path.extend(_PLUGINS_PATH)
log = logging.getLogger("PreOS")
def preos_plugin(obj):
"""It is preos if _PREOS_MARKER is True and has _PREOS_CALL."""
if hasattr(obj, _PREOS_MARKER):
is_preos = getattr(obj, _PREOS_MARKER)
else:
is_preos = False
if is_preos and hasattr(obj, _PREOS_CALL):
yield obj
def scan_preos_dir_plugins(dir):
for fname in os.listdir(dir):
if os.path.isfile(os.path.join(dir, fname)):
fname = os.path.splitext(fname)[0]
module_name = fname
try:
module = __import__(module_name)
yield from preos_plugin(module)
clsmembers = inspect.getmembers(module, inspect.isclass)
for cm in clsmembers:
c = cm[1]
yield from preos_plugin(c)
except ImportError as e:
log.warning("Cannot import '{}': {}".format(module_name, e))
def find_preos_plugins():
for dir in _PLUGINS_PATH:
yield from scan_preos_dir_plugins(dir)
def find_preoses():
preoses = {}
for preos in find_preos_plugins():
if hasattr(preos, _PREOS_NAME):
preos_name = getattr(preos, _PREOS_NAME)
else:
preos_name = preos.__name__.lower()
preoses[preos_name] = preos
return preoses
def check_root():
if os.geteuid() != 0:
raise cdist.Error("Must be run with root privileges")
class PreOS(object):
preoses = None
@classmethod
def commandline(cls, argv):
if not cls.preoses:
cls.preoses = find_preoses()
parser = argparse.ArgumentParser(
description="Create PreOS", prog="cdist preos")
parser.add_argument('preos', help='PreOS to create, one of: {}'.format(
set(cls.preoses)))
args = parser.parse_args(argv[1:2])
preos_name = args.preos
if preos_name in cls.preoses:
preos = cls.preoses[preos_name]
func = getattr(preos, _PREOS_CALL)
if inspect.ismodule(preos):
func_args = [preos, argv[2:], ]
else:
func_args = [argv[2:], ]
log.info("Running preos : {}".format(preos_name))
func(*func_args)
else:
log.error("Unknown preos: {}, available preoses: {}".format(
preos_name, set(cls.preoses.keys())))

View file

@ -1 +0,0 @@
from debootstrap.debootstrap import Debian, Ubuntu, Devuan

View file

@ -1,239 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2016 Darko Poljak (darko.poljak at ungleich.ch)
#
# This file is part of cdist.
#
# cdist is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cdist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
#
#
import cdist
import cdist.config
import cdist.core
import cdist.preos
import argparse
import cdist.argparse
import logging
import os
import subprocess
class Debian(object):
_preos_name = 'debian'
_cdist_preos = True
_files_dir = os.path.join(os.path.dirname(__file__), "files")
@classmethod
def default_args(cls):
default_remote_exec = os.path.join(cls._files_dir, "remote-exec.sh")
default_remote_copy = os.path.join(cls._files_dir, "remote-copy.sh")
default_init_manifest = os.path.join(
cls._files_dir, "init-manifest-{}".format(cls._preos_name))
defargs = argparse.Namespace()
defargs.arch = 'amd64'
defargs.bootstrap = False
defargs.configure = False
defargs.cdist_params = '-v'
defargs.rm_bootstrap_dir = False
defargs.suite = 'stable'
defargs.remote_exec = default_remote_exec
defargs.remote_copy = default_remote_copy
defargs.manifest = default_init_manifest
return defargs
@classmethod
def get_parser(cls):
defargs = cls.default_args()
cdist_parser = cdist.argparse.get_parsers()
parser = argparse.ArgumentParser(
prog='cdist preos {}'.format(cls._preos_name),
parents=[cdist_parser['loglevel'], cdist_parser['beta']])
parser.add_argument('target_dir', nargs=1,
help=("target directory where PreOS will be "
"bootstrapped"))
parser.add_argument(
'-a', '--arch',
help="target debootstrap architecture, by default '{}'".format(
defargs.arch), dest='arch', default=defargs.arch)
parser.add_argument(
'-B', '--bootstrap',
help='do bootstrap step',
dest='bootstrap', action='store_true', default=defargs.bootstrap)
parser.add_argument(
'-C', '--configure',
help='do configure step',
dest='configure', action='store_true', default=defargs.configure)
parser.add_argument(
'-c', '--cdist-params',
help=("parameters that will be passed to cdist config, by default"
" '{}' is used".format(defargs.cdist_params)),
dest='cdist_params', default=defargs.cdist_params)
parser.add_argument(
'-D', '--drive-boot',
help='create bootable PreOS on specified drive',
dest='drive')
parser.add_argument(
'-e', '--remote-exec',
help=("remote exec that cdist config will use, by default "
"internal script is used"),
dest='remote_exec', default=defargs.remote_exec)
parser.add_argument(
'-i', '--init-manifest',
help=("init manifest that cdist config will use, by default "
"internal init manifest is used"),
dest='manifest', default=defargs.manifest)
parser.add_argument(
'-k', '--keyfile', action="append",
help=("ssh key files that will be added to cdist config; "
"'__ssh_authorized_keys root ...' type is appended to "
"initial manifest"),
dest='keyfile')
parser.add_argument(
'-m', '--mirror',
help='use specified mirror for debootstrap',
dest='mirror')
parser.add_argument(
'-P', '--root-password',
help='Set specified password for root, generated by default',
dest='root_password')
parser.add_argument('-p', '--pxe-boot-dir', help='PXE boot directory',
dest='pxe_boot_dir')
parser.add_argument(
'-r', '--rm-bootstrap-dir',
help='remove target directory after finishing',
dest='rm_bootstrap_dir', action='store_true',
default=defargs.rm_bootstrap_dir)
parser.add_argument(
'-S', '--script',
help='use specified script for debootstrap',
dest='script')
parser.add_argument('-s', '--suite',
help="suite used for debootstrap, "
"by default '{}'".format(defargs.suite),
dest='suite', default=defargs.suite)
parser.add_argument(
'-y', '--remote-copy',
help=("remote copy that cdist config will use, by default "
"internal script is used"),
dest='remote_copy', default=defargs.remote_copy)
parser.epilog = cdist.argparse.EPILOG
return parser
@classmethod
def update_env(cls, env):
pass
@classmethod
def commandline(cls, argv):
log = logging.getLogger(cls.__name__)
parser = cls.get_parser()
args = parser.parse_args(argv)
if args.script and not args.mirror:
raise cdist.Error("script option cannot be used without "
"mirror option")
args.command = cls._preos_name
cdist.argparse.check_beta(vars(args))
cdist.preos.check_root()
args.target_dir = os.path.realpath(args.target_dir[0])
args.os = cls._preos_name
args.remote_exec = os.path.realpath(args.remote_exec)
args.remote_copy = os.path.realpath(args.remote_copy)
args.manifest = os.path.realpath(args.manifest)
if args.keyfile:
new_keyfile = [os.path.realpath(x) for x in args.keyfile]
args.keyfile = new_keyfile
if args.pxe_boot_dir:
args.pxe_boot_dir = os.path.realpath(args.pxe_boot_dir)
cdist.argparse.handle_loglevel(args)
log.debug("preos: {}, args: {}".format(cls._preos_name, args))
try:
env = vars(args)
new_env = {}
for key in env:
if key == 'verbose' and env[key]:
if env[key] >= 3:
new_env['debug'] = "yes"
elif env[key] == 2:
new_env['verbose'] = "yes"
elif not env[key]:
new_env[key] = ''
elif isinstance(env[key], bool) and env[key]:
new_env[key] = "yes"
elif isinstance(env[key], list):
val = env[key]
new_env[key + "_cnt"] = str(len(val))
for i, v in enumerate(val):
new_env[key + "_" + str(i)] = v
else:
new_env[key] = str(env[key])
env = new_env
env.update(os.environ)
cls.update_env(env)
log.debug("preos: {} env: {}".format(cls._preos_name, env))
cmd = os.path.join(cls._files_dir, "code")
info_msg = ["Running preos: {}, suite: {}, arch: {}".format(
cls._preos_name, args.suite, args.arch), ]
if args.mirror:
info_msg.append("mirror: {}".format(args.mirror))
if args.script:
info_msg.append("script: {}".format(args.script))
if args.bootstrap:
info_msg.append("bootstrapping")
if args.configure:
info_msg.append("configuring")
if args.pxe_boot_dir:
info_msg.append("creating PXE")
if args.drive:
info_msg.append("creating bootable drive")
log.info(info_msg)
log.debug("cmd={}".format(cmd))
subprocess.check_call(cmd, env=env, shell=True)
except subprocess.CalledProcessError as e:
log.error("preos {} failed: {}".format(cls._preos_name, e))
class Ubuntu(Debian):
_preos_name = "ubuntu"
@classmethod
def default_args(cls):
defargs = super().default_args()
defargs.suite = 'xenial'
return defargs
class Devuan(Debian):
_preos_name = "devuan"
@classmethod
def default_args(cls):
defargs = super().default_args()
defargs.suite = 'jessie'
return defargs
@classmethod
def update_env(cls, env):
env['DEBOOTSTRAP_DIR'] = os.path.join(cls._files_dir,
'devuan-debootstrap')

View file

@ -1,274 +0,0 @@
#!/bin/sh
##
## 2016 Darko Poljak (darko.poljak at ungleich.ch)
##
## This file is part of cdist.
##
## cdist is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## cdist is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with cdist. If not, see <http://www.gnu.org/licenses/>.
set -e
if [ "${debug}" ]
then
set -x
cdist_params="${cdist_params} -d"
fi
bootstrap_dir="${target_dir}"
case "${os}" in
ubuntu|debian|devuan)
# nothing, those are valid values
;;
*)
echo "ERROR: invalid os value: ${os}" >&2
exit 1
;;
esac
check_bootstrap_dir() {
if [ ! -e "$1" ]
then
echo "ERROR: bootstrap directory $1 does not exist" >&2
exit 1
fi
}
# bootstrap
if [ "${bootstrap}" ]
then
if [ "${DEBOOTSTRAP_DIR}" ]
then
debootstrap_cmd="${DEBOOTSTRAP_DIR}/debootstrap"
else
command -v debootstrap 2>&1 > /dev/null || {
echo "ERROR: debootstrap not found" >&2
exit 1
}
debootstrap_cmd="debootstrap"
fi
# If PreOS on drive then do not check for directory emptiness.
# Partition can at least contain 'lost+found' directory.
if [ ! "${drive}" ]
then
if [ -e "${bootstrap_dir}" ]
then
dir_content=$(ls -A "${bootstrap_dir}" | wc -l)
else
dir_content=0
fi
if [ "${dir_content}" -ne 0 ]
then
echo "ERROR: "${bootstrap_dir}" not empty " >&2
exit 1
fi
fi
if [ "${verbose}" -o "${debug}" ]
then
echo "bootstrapping..."
fi
mkdir -p "${bootstrap_dir}"
"${debootstrap_cmd}" --include=openssh-server --arch=${arch} ${suite} ${bootstrap_dir} \
${mirror} ${script}
if [ "${verbose}" -o "${debug}" ]
then
echo "bootstrap finished"
fi
fi
chroot_mount() {
mount -t proc none "${bootstrap_dir}/proc" || true
mount -t sysfs none "${bootstrap_dir}/sys" || true
mount -o bind /dev "${bootstrap_dir}/dev" || true
mount -t devpts none "${bootstrap_dir}/dev/pts" || true
}
chroot_umount() {
umount "${bootstrap_dir}/dev/pts" || true
umount "${bootstrap_dir}/dev" || true
umount "${bootstrap_dir}/sys" || true
umount "${bootstrap_dir}/proc" || true
}
TRAPFUNC="umount \"${bootstrap_dir}/dev/pts\" || true; \
umount \"${bootstrap_dir}/dev\" || true; \
umount \"${bootstrap_dir}/sys\" || true; \
umount \"${bootstrap_dir}/proc\" || true;"
# config
if [ "${configure}" ]
then
if [ ! -f "${manifest}" ]
then
echo "ERROR: ${manifest} does not exist" >&2
exit 1
fi
if [ ! -f "${remote_exec}" ]
then
echo "ERROR: ${remote_exec} does not exist" >&2
exit 1
fi
if [ ! -f "${remote_copy}" ]
then
echo "ERROR: ${remote_copy} does not exist" >&2
exit 1
fi
if [ "${keyfile_cnt}" -a "${keyfile_cnt}" -gt 0 ]
then
i="$((keyfile_cnt - 1))"
keyfiles=""
while [ "${i}" -ge 0 ]
do
kf_var="keyfile_${i}"
eval kf='$'"${kf_var}"
if [ ! -f "${kf}" ]
then
echo "ERROR: ${kf} does not exist" >&2
exit 1
fi
key=$(cat "${kf}")
keyfiles="${keyfiles} --key '${key}'"
i=$((i - 1))
done
ssh_auth_keys_line="__ssh_authorized_keys root ${keyfiles}\n"
else
ssh_auth_keys_line=""
fi
check_bootstrap_dir "${bootstrap_dir}"
if [ "${verbose}" -o "${debug}" ]
then
echo "configuring..."
fi
trap "${TRAPFUNC}" 0 1 2 3 15
chroot_mount
chroot "${bootstrap_dir}" /usr/bin/apt-get update
if [ "${drive}" ]
then
grub_manifest_line="__package grub-pc --state present\n"
grub_kern_params_line="__line linux_kernel_params \
--file /etc/default/grub \
--line 'GRUB_CMDLINE_LINUX_DEFAULT=\"quiet splash net.ifnames=0\"'\n"
else
grub_manifest_line=""
grub_kern_params_line=""
fi
grub_lines="${grub_manifest_line}${grub_kern_params_line}"
printf "${ssh_auth_keys_line}${grub_lines}" \
| cat "${manifest}" - |\
cdist config \
${cdist_params} -i - \
--remote-exec "${remote_exec}" \
--remote-copy "${remote_copy}" \
"${bootstrap_dir}"
# __hostname with systmed uses hostnamectl which needs dbus running
# set hostname explicitly here instead
printf "preos\n" > "${bootstrap_dir}/etc/hostname"
chroot "${bootstrap_dir}" /usr/bin/apt-get autoclean
chroot "${bootstrap_dir}" /usr/bin/apt-get clean
chroot "${bootstrap_dir}" /usr/bin/apt-get autoremove
chroot_umount
trap - 0 1 2 3 15
if [ "${verbose}" -o "${debug}" ]
then
echo "configuring finished"
fi
fi
if [ "${pxe_boot_dir}" ]
then
check_bootstrap_dir "${bootstrap_dir}"
if [ "${verbose}" -o "${debug}" ]
then
echo "creating pxe..."
fi
mkdir -p "${pxe_boot_dir}"
cp "${bootstrap_dir}"/boot/vmlinuz-* "${pxe_boot_dir}/kernel"
cd "${bootstrap_dir}"
find . -print0 | cpio --null -o --format=newc | gzip -9 > "${pxe_boot_dir}/initramfs"
mkdir -p "${pxe_boot_dir}/pxelinux.cfg"
cat <<EOPXEF > "${pxe_boot_dir}/pxelinux.cfg/default"
DEFAULT preos
LABEL preos
KERNEL kernel
APPEND utf8 load_ramdisk=1 root=/dev/ram nofb initrd=initramfs console=ttyS1,115200 net.ifnames=0
EOPXEF
cp "${bootstrap_dir}/usr/lib/PXELINUX/pxelinux.0" "${pxe_boot_dir}/pxelinux.0"
cp "${bootstrap_dir}/usr/lib/syslinux/modules/bios/ldlinux.c32" \
"${pxe_boot_dir}/ldlinux.c32"
# network boot need all files world readable
chmod -R 644 "${pxe_boot_dir}"/*
if [ "${verbose}" -o "${debug}" ]
then
echo "pxe creation finished"
fi
fi
if [ "${drive}" ]
then
trap "${TRAPFUNC}" 0 1 2 3 15
chroot_mount
chroot "${bootstrap_dir}" grub-install ${drive}
chroot "${bootstrap_dir}" /bin/sh -c "GRUB_DISABLE_OS_PROBER=true update-grub"
# set root password
if [ ! "${root_password}" ]
then
if ! which strings >/dev/null 2>&1
then
printf "strings is missing\n" >&2
exit 1
fi
root_password="$(head -n 1000 /dev/urandom | strings | \
grep -o '[[:alnum:]]' | head -n 30 | tr -d '\n')"
printf "Generated root password (without quotes):'${root_password}'\n"
fi
chroot "${bootstrap_dir}" /bin/sh -c "echo \"root:${root_password}\" | \
chpasswd"
# /etc/securetty must not be world writeable.
chmod 644 "${bootstrap_dir}"/etc/securetty
chroot_umount
trap - 0 1 2 3 15
fi
if [ "${rm_bootstrap_dir}" ]
then
if [ "${verbose}" -o "${debug}" ]
then
echo "removing bootstrap dir..."
fi
rm -r -f "${bootstrap_dir}"
if [ "${verbose}" -o "${debug}" ]
then
echo "removing bootstrap dir finished"
fi
fi

View file

@ -1,18 +0,0 @@
# avoid dpkg-dev dependency; fish out the version with sed
VERSION := $(shell sed 's/.*(\(.*\)).*/\1/; q' debian/changelog)
all:
clean:
DSDIR=$(DESTDIR)/usr/share/debootstrap
install:
mkdir -p $(DSDIR)/scripts
mkdir -p $(DESTDIR)/usr/sbin
cp -a scripts/* $(DSDIR)/scripts/
install -o root -g root -m 0644 functions $(DSDIR)/
sed 's/@VERSION@/$(VERSION)/g' debootstrap >$(DESTDIR)/usr/sbin/debootstrap
chown root:root $(DESTDIR)/usr/sbin/debootstrap
chmod 0755 $(DESTDIR)/usr/sbin/debootstrap

View file

@ -1,65 +0,0 @@
README for debootstrap
======================
See the manpage for (some) documentation.
Running debootstrap from source
-------------------------------
You can run debootstrap from its source tree without installing it. This
can be useful if you want a quick way to make a Debian chroot on another
system, or if you are testing modifications to debootstrap.
First, get the source.
* Either by using git
git clone https://anonscm.debian.org/git/d-i/debootstrap.git
* Or by visiting <https://packages.debian.org/source/sid/debootstrap>
and downloading the tar.gz file
Then in the debootstrap source directory:
export DEBOOTSTRAP_DIR=`pwd`
sudo ./debootstrap stable my-stable-dir
If you are running a multi-stage boot strap (for example for a QEMU
rootfs) you don't even need root:
export DEBOOTSTRAP_DIR=`pwd`
fakeroot ./debootstrap --foreign --arch=armhf testing my-testing-dir http://deb.debian.org/debian
Of course you will need to execute the second stage as root to finish the bootstrap:
(on foreign hardware)
/debootstrap/debootstrap --second-stage
Future
------
* Cross-strap support - so you can bootstrap a filesystem to the
point where it will successfully boot, and finish installing itself
without having to be running the target architecture or OS yourself.
debootstrap --arch powerpc sarge ./sarge-ppc-chroot ...
on an i386 system, boot a powerpc box with sarge-ppc-chroot as its
root files system, and have it "work". The cross-hurd package does
something similar, and should be replaced by this feature.
* There should be some (better) way of telling debootstrap what "base"
packages you want to install -- this varies between making a chroot,
doing an install, and doing a buildd. Also, some installs want
different base packages (to setup networking, or kernels, eg)
NMUing
------
If there's a problem with debootstrap that you need fixed, feel free to do
an NMU to fix it. Usual rules: try not to break anything, and mail the
patch to the BTS. Don't worry about asking first though.
However, note that debootstrap is now team maintained. Anyone in d-i can do
a release without the bother of a NMU.

View file

@ -1,11 +0,0 @@
Features:
++ second stage via chroot debootstrap/debootstrap
++ debootstrap/deb file to record deb destinations/information
-- configuration file
-- versus command line
-- support for sources (vs mirrors)
-- faux-pinning for packages
++ makedev in second stage

View file

@ -1,6 +0,0 @@
debootstrap
debootstrap-udeb
files
*.debhelper.log
*.substvars

View file

@ -1,15 +0,0 @@
To sync up with debians source for inspiration you should run the following:
`git remote add alioth-git git://anonscm.debian.org/d-i/debootstrap.git`
`git fetch alioth-git`
After that you can either cherry-pick or merge releases from debian. To
merge a release, it's do:
`git tag` to list the release tags
and
`git merge <tag>`
followed by all the fixups and then commit with an appropriate message like
"Merging Release <tag> from debian"
Copyright 2016 Daniel Reurich <daniel@centurion.net.nz>

View file

@ -1,26 +0,0 @@
Source: debootstrap
Section: admin
Priority: extra
Maintainer: Franco (nextime) Lanza <nextime@devuan.org>
Uploaders: Franco (nextime) Lanza <nextime@devuan.org>, Daniel Reurich <daniel@centurion.net.nz>
Build-Depends: debhelper (>= 9), makedev (>= 2.3.1-69) [linux-any], git
Standards-Version: 3.9.8
Vcs-Browser: https://git.devuan.org/devuan-packages/debootstrap
Vcs-Git: https://git.devuan.org/devuan-packages/debootstrap.git
Package: debootstrap
Architecture: all
Depends: ${misc:Depends}, wget
Recommends: gnupg, ${keyring}, devuan-keyring
Description: Bootstrap a basic Devuan system
debootstrap is used to create a Devuan base system from scratch,
without requiring the availability of dpkg or apt. It does this by
downloading .deb files from a mirror site, and carefully unpacking them
into a directory which can eventually be chrooted into.
Package: debootstrap-udeb
Section: debian-installer
Package-Type: udeb
Architecture: all
Depends: ${misc:Depends}, mounted-partitions
Description: Bootstrap the Devuan system

View file

@ -1,30 +0,0 @@
This package was debianized by Anthony Towns <ajt@debian.org> on
Tue, 30 Jan 2001 10:54:45 +1000.
It was written from scratch for Debian by Anthony Towns <ajt@debian.org>
based loosely on the code for constructing base tarballs as part of the
boot-floppies package.
Copyright:
Copyright (c) 2001-2005 Anthony Towns
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,9 +0,0 @@
[DEFAULT]
compression = xz
pristine-tar = false
upstream-tag = devuan/1.0.85
[git-buildpackage]
upstream-tree = tag
tarball-dir = ../tarballs/
export-dir = ../build-area/

View file

@ -1,47 +0,0 @@
#! /usr/bin/make -f
ifeq (0,$(shell dpkg-vendor --derives-from Ubuntu; echo $$?))
KEYRING := ubuntu-keyring
else ifeq (0,$(shell dpkg-vendor --derives-from Devuan; echo $$?))
KEYRING := devuan-keyring
else ifeq (0,$(shell dpkg-vendor --derives-from Tanglu; echo $$?))
KEYRING := tanglu-archive-keyring
else
KEYRING := debian-archive-keyring
endif
%:
dh $@
# need to be root to make devices, so build is done in install target
override_dh_auto_build:
override_dh_auto_install:
dh_auto_build
$(MAKE) install DESTDIR=$(CURDIR)/debian/debootstrap
$(MAKE) install DESTDIR=$(CURDIR)/debian/debootstrap-udeb
# remove scripts not needed by d-i
-rm -f debian/debootstrap-udeb/usr/share/debootstrap/scripts/potato \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/woody \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/sarge \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/warty \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/hoary \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/breezy \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/dapper \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/edgy \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/feisty \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/*.buildd \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/*.fakechroot \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/stable \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/testing \
debian/debootstrap-udeb/usr/share/debootstrap/scripts/unstable
override_dh_gencontrol:
dh_gencontrol -- -Vkeyring=$(KEYRING)
# Specify gzip to mitigate #770217:
override_dh_builddeb:
dh_builddeb -pdebootstrap -- -Zgzip
dh_builddeb -pdebootstrap-udeb -- -Zxz

View file

@ -1,703 +0,0 @@
#!/bin/sh
set -e
VERSION='@VERSION@'
unset TMP TEMP TMPDIR || true
# might not be exported if we're running from init=/bin/sh or similar
export PATH
###########################################################################
if [ -z "$DEBOOTSTRAP_DIR" ]; then
if [ -x /debootstrap/debootstrap ]; then
DEBOOTSTRAP_DIR=/debootstrap
else
DEBOOTSTRAP_DIR=/usr/share/debootstrap
fi
fi
. $DEBOOTSTRAP_DIR/functions
exec 4>&1
LANG=C
USE_COMPONENTS=main
KEYRING=""
DISABLE_KEYRING=""
FORCE_KEYRING=""
VARIANT=""
MERGED_USR="no"
ARCH=""
HOST_ARCH=""
HOST_OS=""
KEEP_DEBOOTSTRAP_DIR=""
USE_DEBIANINSTALLER_INTERACTION=""
SECOND_STAGE_ONLY=""
PRINT_DEBS=""
CHROOTDIR=""
MAKE_TARBALL=""
EXTRACTOR_OVERRIDE=""
UNPACK_TARBALL=""
ADDITIONAL=""
EXCLUDE=""
VERBOSE=""
CERTIFICATE=""
CHECKCERTIF=""
PRIVATEKEY=""
DEF_MIRROR="http://packages.devuan.org/merged"
DEF_HTTPS_MIRROR="https://packages.devuan.org/merged"
export LANG USE_COMPONENTS EXCLUDE
umask 022
###########################################################################
## phases:
## finddebs dldebs printdebs first_stage second_stage
RESOLVE_DEPS=true
WHAT_TO_DO="finddebs dldebs first_stage second_stage"
am_doing_phase () {
# usage: if am_doing_phase finddebs; then ...; fi
local x;
for x in "$@"; do
if echo " $WHAT_TO_DO " | grep -q " $x "; then return 0; fi
done
return 1
}
###########################################################################
usage_err()
{
info USAGE1 "usage: [OPTION]... <suite> <target> [<mirror> [<script>]]"
info USAGE2 "Try \`${0##*/} --help' for more information."
error "$@"
}
usage()
{
echo "Usage: ${0##*/} [OPTION]... <suite> <target> [<mirror> [<script>]]"
echo "Bootstrap a Debian base system into a target directory."
echo
cat <<EOF
--help display this help and exit
--version display version information and exit
--verbose don't turn off the output of wget
--download-only download packages, but don't perform installation
--print-debs print the packages to be installed, and exit
--arch=A set the architecture to install (use if no dpkg)
[ --arch=powerpc ]
--include=A,B,C adds specified names to the list of base packages
--exclude=A,B,C removes specified packages from the list
--components=A,B,C use packages from the listed components of the
archive
--variant=X use variant X of the bootstrap scripts
(currently supported variants: buildd, fakechroot,
minbase)
--merged-usr make /{bin,sbin,lib}/ symlinks to /usr/
--keyring=K check Release files against keyring K
--no-check-gpg avoid checking Release file signatures
--force-check-gpg force checking Release file signatures
(also disables automatic fallback to HTTPS in case
of a missing keyring), aborting otherwise
--no-resolve-deps don't try to resolve dependencies automatically
--unpack-tarball=T acquire .debs from a tarball instead of http
--make-tarball=T download .debs and create a tarball (tgz format)
--second-stage-target=DIR
Run second stage in a subdirectory instead of root
(can be used to create a foreign chroot)
(requires --second-stage)
--extractor=TYPE override automatic .deb extractor selection
(supported: $EXTRACTORS_SUPPORTED)
--debian-installer used for internal purposes by debian-installer
--private-key=file read the private key from file
--certificate=file use the client certificate stored in file (PEM)
--no-check-certificate do not check certificate against certificate authorities
EOF
}
###########################################################################
if [ -z "$PKGDETAILS" ]; then
error 1 NO_PKGDETAILS "No pkgdetails available; either install perl, or build pkgdetails.c from the base-installer source package"
fi
###########################################################################
if [ $# != 0 ] ; then
while true ; do
case "$1" in
--help)
usage
exit 0
;;
--version)
echo "debootstrap $VERSION"
exit 0
;;
--debian-installer)
if ! (echo -n "" >&3) 2>/dev/null; then
error 1 ARG_DIBYHAND "If running debootstrap by hand, don't use --debian-installer"
fi
USE_DEBIANINSTALLER_INTERACTION=yes
shift
;;
--foreign)
if [ "$PRINT_DEBS" != "true" ]; then
WHAT_TO_DO="finddebs dldebs first_stage"
fi
shift
;;
--second-stage)
WHAT_TO_DO="second_stage"
SECOND_STAGE_ONLY=true
shift
;;
--second-stage-target|--second-stage-target=?*)
if [ "$SECOND_STAGE_ONLY" != "true" ] ; then
error 1 STAGE2ONLY "option %s only applies in the second stage" "$1"
fi
if [ "$1" = "--second-stage-target" -a -n "$2" ] ; then
CHROOTDIR="$2"
shift 2
elif [ "$1" != "${1#--second-stage-target=}" ]; then
CHROOTDIR="${1#--second-stage-target=}"
shift
else
error 1 NEEDARG "option requires an argument: %s" "$1"
fi
;;
--print-debs)
WHAT_TO_DO="finddebs printdebs kill_target"
PRINT_DEBS=true
shift
;;
--download-only)
WHAT_TO_DO="finddebs dldebs"
shift
;;
--make-tarball|--make-tarball=?*)
WHAT_TO_DO="finddebs dldebs maketarball kill_target"
if [ "$1" = "--make-tarball" -a -n "$2" ] ; then
MAKE_TARBALL="$2"
shift 2
elif [ "$1" != "${1#--make-tarball=}" ]; then
MAKE_TARBALL="${1#--make-tarball=}"
shift
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
;;
--resolve-deps)
# redundant, but avoids breaking compatibility
RESOLVE_DEPS=true
shift
;;
--no-resolve-deps)
RESOLVE_DEPS=false
shift
;;
--keep-debootstrap-dir)
KEEP_DEBOOTSTRAP_DIR=true
shift
;;
--arch|--arch=?*)
if [ "$1" = "--arch" -a -n "$2" ] ; then
ARCH="$2"
shift 2
elif [ "$1" != "${1#--arch=}" ]; then
ARCH="${1#--arch=}"
shift
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
;;
--extractor|--extractor=?*)
if [ "$1" = "--extractor" -a -n "$2" ] ; then
EXTRACTOR_OVERRIDE="$2"
shift 2
elif [ "$1" != "${1#--extractor=}" ]; then
EXTRACTOR_OVERRIDE="${1#--extractor=}"
shift
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
if valid_extractor "$EXTRACTOR_OVERRIDE"; then
if ! type "$EXTRACTOR_OVERRIDE" >/dev/null 2>&1; then
error 1 MISSINGEXTRACTOR "The selected extractor cannot be found: %s" "$EXTRACTOR_OVERRIDE"
fi
else
error 1 BADEXTRACTOR "%s: unknown extractor" "$EXTRACTOR_OVERRIDE"
fi
;;
--unpack-tarball|--unpack-tarball=?*)
if [ "$1" = "--unpack-tarball" -a -n "$2" ] ; then
UNPACK_TARBALL="$2"
shift 2
elif [ "$1" != "${1#--unpack-tarball=}" ]; then
UNPACK_TARBALL="${1#--unpack-tarball=}"
shift
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
if [ ! -f "$UNPACK_TARBALL" ] ; then
error 1 NOTARBALL "%s: No such file or directory" "$UNPACK_TARBALL"
fi
;;
--include|--include=?*)
if [ "$1" = "--include" -a -n "$2" ]; then
ADDITIONAL="$2"
shift 2
elif [ "$1" != "${1#--include=}" ]; then
ADDITIONAL="${1#--include=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
ADDITIONAL="$(echo "$ADDITIONAL" | tr , " ")"
;;
--exclude|--exclude=?*)
if [ "$1" = "--exclude" -a -n "$2" ]; then
EXCLUDE="$2"
shift 2
elif [ "$1" != "${1#--exclude=}" ]; then
EXCLUDE="${1#--exclude=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
EXCLUDE="$(echo "$EXCLUDE" | tr , " ")"
;;
--verbose)
VERBOSE=true
export VERBOSE
shift 1
;;
--components|--components=?*)
if [ "$1" = "--components" -a -n "$2" ]; then
USE_COMPONENTS="$2"
shift 2
elif [ "$1" != "${1#--components=}" ]; then
USE_COMPONENTS="${1#--components=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
USE_COMPONENTS="$(echo "$USE_COMPONENTS" | tr , "|")"
;;
--variant|--variant=?*)
if [ "$1" = "--variant" -a -n "$2" ]; then
VARIANT="$2"
shift 2
elif [ "$1" != "${1#--variant=}" ]; then
VARIANT="${1#--variant=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
;;
--merged-usr)
MERGED_USR=yes
shift
;;
--no-merged-usr)
MERGED_USR=no
shift
;;
--keyring|--keyring=?*)
if ! gpgv --version >/dev/null 2>&1; then
error 1 NEEDGPGV "gpgv not installed, but required for Release verification"
fi
if [ "$1" = "--keyring" -a -n "$2" ]; then
KEYRING="$2"
shift 2
elif [ "$1" != "${1#--keyring=}" ]; then
KEYRING="${1#--keyring=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
;;
--no-check-gpg)
shift 1
DISABLE_KEYRING=1
;;
--force-check-gpg)
shift 1
FORCE_KEYRING=1
;;
--certificate|--certificate=?*)
if [ "$1" = "--certificate" -a -n "$2" ]; then
CERTIFICATE="--certificate=$2"
shift 2
elif [ "$1" != "${1#--certificate=}" ]; then
CERTIFICATE="--certificate=${1#--certificate=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
;;
--private-key|--private-key=?*)
if [ "$1" = "--private-key" -a -n "$2" ]; then
PRIVATEKEY="--private-key=$2"
shift 2
elif [ "$1" != "${1#--private-key=}" ]; then
PRIVATEKEY="--private-key=${1#--private-key=}"
shift 1
else
error 1 NEEDARG "option requires an argument %s" "$1"
fi
;;
--no-check-certificate)
CHECKCERTIF="--no-check-certificate"
shift
;;
-*)
error 1 BADARG "unrecognized or invalid option %s" "$1"
;;
*)
break
;;
esac
done
fi
###########################################################################
if [ -n "$DISABLE_KEYRING" -a -n "$FORCE_KEYRING" ]; then
error 1 BADARG "Both --no-check-gpg and --force-check-gpg specified, please pick one (at most)"
fi
###########################################################################
if [ "$SECOND_STAGE_ONLY" = "true" ]; then
SUITE=$(cat $DEBOOTSTRAP_DIR/suite)
ARCH=$(cat $DEBOOTSTRAP_DIR/arch)
if [ -e $DEBOOTSTRAP_DIR/variant ]; then
VARIANT=$(cat $DEBOOTSTRAP_DIR/variant)
SUPPORTED_VARIANTS="$VARIANT"
fi
if [ -z "$CHROOTDIR" ]; then
TARGET=/
else
TARGET=$CHROOTDIR
fi
SCRIPT=$DEBOOTSTRAP_DIR/suite-script
else
if [ -z "$1" ] || [ -z "$2" ]; then
usage_err 1 NEEDSUITETARGET "You must specify a suite and a target."
fi
SUITE="$1"
TARGET="$2"
USER_MIRROR="$3"
TARGET="${TARGET%/}"
if [ "${TARGET#/}" = "${TARGET}" ]; then
if [ "${TARGET%/*}" = "$TARGET" ] ; then
TARGET="$(echo "`pwd`/$TARGET")"
else
TARGET="$(cd "${TARGET%/*}"; echo "`pwd`/${TARGET##*/}")"
fi
fi
SCRIPT="$DEBOOTSTRAP_DIR/scripts/$1"
if [ -n "$VARIANT" ] && [ -e "${SCRIPT}.${VARIANT}" ]; then
SCRIPT="${SCRIPT}.${VARIANT}"
SUPPORTED_VARIANTS="$VARIANT"
fi
if [ "$4" != "" ]; then
SCRIPT="$4"
fi
fi
###########################################################################
if in_path dpkg && \
dpkg --print-architecture >/dev/null 2>&1; then
HOST_ARCH=`/usr/bin/dpkg --print-architecture`
elif in_path udpkg && \
udpkg --print-architecture >/dev/null 2>&1; then
HOST_ARCH=`/usr/bin/udpkg --print-architecture`
elif [ -e $DEBOOTSTRAP_DIR/arch ]; then
HOST_ARCH=`cat $DEBOOTSTRAP_DIR/arch`
fi
HOST_OS="$HOST_ARCH"
# basic host OS guessing for non-Debian systems
if [ -z "$HOST_OS" ]; then
case `uname` in
Linux)
HOST_OS=linux
;;
GNU/kFreeBSD)
HOST_OS=kfreebsd
;;
GNU)
HOST_OS=hurd
;;
FreeBSD*)
HOST_OS=freebsd
;;
esac
fi
if [ -z "$ARCH" ]; then
ARCH=$HOST_ARCH
fi
if [ -z "$ARCH" ] || [ -z "$HOST_OS" ]; then
error 1 WHATARCH "Couldn't work out current architecture"
fi
if [ "$HOST_OS" = "kfreebsd" ] || [ "$HOST_OS" = "freebsd" ]; then
for module in linprocfs fdescfs tmpfs linsysfs; do
kldstat -m "$module" > /dev/null 2>&1 || warning SANITYCHECK "Probably required module %s is not loaded" "$module"
done
fi
if [ "$TARGET" = "/" ]; then
CHROOT_CMD=""
else
CHROOT_CMD="chroot $TARGET"
fi
if [ -z "$SHA_SIZE" ]; then
SHA_SIZE=256
fi
if ! in_path "sha${SHA_SIZE}sum" && ! in_path "sha${SHA_SIZE}"; then
SHA_SIZE=1
fi
DEBOOTSTRAP_CHECKSUM_FIELD="SHA$SHA_SIZE"
export ARCH SUITE TARGET CHROOT_CMD SHA_SIZE DEBOOTSTRAP_CHECKSUM_FIELD
if am_doing_phase first_stage second_stage; then
if in_path id && [ `id -u` -ne 0 ]; then
error 1 NEEDROOT "debootstrap can only run as root"
fi
# Ensure that we can create working devices and executables on the target.
if ! check_sane_mount "$TARGET"; then
error 1 NOEXEC "Cannot install into target '$TARGET' mounted with noexec or nodev"
fi
fi
if [ ! -e "$SCRIPT" ]; then
error 1 NOSCRIPT "No such script: %s" "$SCRIPT"
fi
###########################################################################
if [ "$TARGET" != "" ]; then
mkdir -p "$TARGET/debootstrap"
fi
###########################################################################
# Use of fd's by functions/scripts:
#
# stdin/stdout/stderr: used normally
# fd 4: I:/W:/etc information
# fd 5,6: spare for functions
# fd 7,8: spare for scripts
if [ "$USE_DEBIANINSTALLER_INTERACTION" = yes ]; then
# stdout=stderr: full log of debootstrap run
# fd 3: I:/W:/etc information
exec 4>&3
elif am_doing_phase printdebs; then
# stderr: I:/W:/etc information
# stdout: debs needed
exec 4>&2
else
# stderr: used in exceptional circumstances only
# stdout: I:/W:/etc information
# $TARGET/debootstrap/debootstrap.log: full log of debootstrap run
exec 4>&1
exec >>"$TARGET/debootstrap/debootstrap.log"
exec 2>&1
fi
###########################################################################
if [ "$UNPACK_TARBALL" ]; then
if [ "${UNPACK_TARBALL#/}" = "$UNPACK_TARBALL" ]; then
error 1 TARPATH "Tarball must be given a complete path"
fi
if [ "${UNPACK_TARBALL%.tar}" != "$UNPACK_TARBALL" ]; then
(cd "$TARGET" && tar -xf "$UNPACK_TARBALL")
elif [ "${UNPACK_TARBALL%.tgz}" != "$UNPACK_TARBALL" ]; then
(cd "$TARGET" && zcat "$UNPACK_TARBALL" | tar -xf -)
else
error 1 NOTTAR "Unknown tarball: must be either .tar or .tgz"
fi
fi
###########################################################################
. "$SCRIPT"
if [ "$SECOND_STAGE_ONLY" = "true" ]; then
MIRRORS=null:
else
MIRRORS="$DEF_MIRROR"
if [ "$USER_MIRROR" != "" ]; then
MIRRORS="$USER_MIRROR"
MIRRORS="${MIRRORS%/}"
fi
fi
export MIRRORS
ok=false
for v in $SUPPORTED_VARIANTS; do
if doing_variant $v; then ok=true; fi
done
if ! $ok; then
error 1 UNSUPPVARIANT "unsupported variant"
fi
###########################################################################
if am_doing_phase finddebs; then
if [ "$FINDDEBS_NEEDS_INDICES" = "true" ] || \
[ "$RESOLVE_DEPS" = "true" ]; then
download_indices
GOT_INDICES=true
fi
work_out_debs
base=$(without "$base $ADDITIONAL" "$EXCLUDE")
if [ "$RESOLVE_DEPS" = true ]; then
requiredX=$(echo $(echo $required | tr ' ' '\n' | sort | uniq))
baseX=$(echo $(echo $base | tr ' ' '\n' | sort | uniq))
baseN=$(without "$baseX" "$requiredX")
baseU=$(without "$baseX" "$baseN")
if [ "$baseU" != "" ]; then
info REDUNDANTBASE "Found packages in base already in required: %s" "$baseU"
fi
info RESOLVEREQ "Resolving dependencies of required packages..."
required=$(resolve_deps $requiredX)
info RESOLVEBASE "Resolving dependencies of base packages..."
base=$(resolve_deps $baseX)
base=$(without "$base" "$required")
requiredX=$(without "$required" "$requiredX")
baseX=$(without "$base" "$baseX")
if [ "$requiredX" != "" ]; then
info NEWREQUIRED "Found additional required dependencies: %s" "$requiredX"
fi
if [ "$baseX" != "" ]; then
info NEWBASE "Found additional base dependencies: %s" "$baseX"
fi
fi
all_debs="$required $base"
fi
if am_doing_phase printdebs; then
echo "$all_debs"
fi
if am_doing_phase dldebs; then
if [ "$GOT_INDICES" != "true" ]; then
download_indices
fi
download $all_debs
fi
if am_doing_phase maketarball; then
(cd $TARGET;
tar czf - var/lib/apt var/cache/apt) >$MAKE_TARBALL
fi
if am_doing_phase first_stage; then
choose_extractor
# first stage sets up the chroot -- no calls should be made to
# "chroot $TARGET" here; but they should be possible by the time it's
# finished
first_stage_install
if ! am_doing_phase second_stage; then
cp "$0" "$TARGET/debootstrap/debootstrap"
cp $DEBOOTSTRAP_DIR/functions "$TARGET/debootstrap/functions"
cp $SCRIPT "$TARGET/debootstrap/suite-script"
echo "$ARCH" >"$TARGET/debootstrap/arch"
echo "$SUITE" >"$TARGET/debootstrap/suite"
[ "" = "$VARIANT" ] ||
echo "$VARIANT" >"$TARGET/debootstrap/variant"
echo "$required" >"$TARGET/debootstrap/required"
echo "$base" >"$TARGET/debootstrap/base"
chmod 755 "$TARGET/debootstrap/debootstrap"
fi
fi
if am_doing_phase second_stage; then
if [ "$SECOND_STAGE_ONLY" = true ]; then
required="$(cat $DEBOOTSTRAP_DIR/required)"
base="$(cat $DEBOOTSTRAP_DIR/base)"
all_debs="$required $base"
fi
# second stage uses the chroot to clean itself up -- has to be able to
# work from entirely within the chroot (in case we've booted into it,
# possibly over NFS eg)
second_stage_install
# create sources.list
# first, kill debootstrap.invalid sources.list
if [ -e "$TARGET/etc/apt/sources.list" ]; then
rm -f "$TARGET/etc/apt/sources.list"
fi
if [ "${MIRRORS#http://}" != "$MIRRORS" ]; then
setup_apt_sources "${MIRRORS%% *}"
mv_invalid_to "${MIRRORS%% *}"
else
setup_apt_sources "$DEF_MIRROR"
mv_invalid_to "$DEF_MIRROR"
fi
if [ -e "$TARGET/debootstrap/debootstrap.log" ]; then
if [ "$KEEP_DEBOOTSTRAP_DIR" = true ]; then
cp "$TARGET/debootstrap/debootstrap.log" "$TARGET/var/log/bootstrap.log"
else
# debootstrap.log is still open as stdout/stderr and needs
# to remain so, but after unlinking it some NFS servers
# implement this by a temporary file in the same directory,
# which makes it impossible to rmdir that directory.
# Moving it instead works around the problem.
mv "$TARGET/debootstrap/debootstrap.log" "$TARGET/var/log/bootstrap.log"
fi
fi
sync
if [ "$KEEP_DEBOOTSTRAP_DIR" = true ]; then
if [ -x "$TARGET/debootstrap/debootstrap" ]; then
chmod 644 "$TARGET/debootstrap/debootstrap"
fi
else
rm -rf "$TARGET/debootstrap"
fi
fi
if am_doing_phase kill_target; then
if [ "$KEEP_DEBOOTSTRAP_DIR" != true ]; then
info KILLTARGET "Deleting target directory"
rm -rf "$TARGET"
fi
fi

View file

@ -1,189 +0,0 @@
.TH DEBOOTSTRAP 8 2015-05-21 "Devuan Project" "Devuan GNU/Linux manual"
.SH NAME
debootstrap \- Bootstrap a basic Devuan system
.SH SYNOPSIS
.B debootstrap
.RB [ OPTION\&.\&.\&. ]
.I SUITE TARGET
.RI [ MIRROR
.RI [ SCRIPT ]]
.B debootstrap
.RB [ OPTION\&.\&.\&. ]
\-\-second\-stage
.SH DESCRIPTION
.B debootstrap
bootstraps a basic Devuan system of
.I SUITE
into
.I TARGET
from
.I MIRROR
by running
.IR SCRIPT .
.I MIRROR
can be an http:// or https:// URL, a file:/// URL, or an ssh:/// URL.
.PP
The
.I SUITE
may be a release code name (eg, ceres, ascii, jessie)
or a symbolic name (eg, unstable, testing, stable, oldstable)
.PP
Notice that file:/ URLs are translated to file:/// (correct scheme as
described in RFC1738 for local filenames), and file:// will \fBnot\fR work.
ssh://USER@HOST/PATH URLs are retrieved using
.BR scp ;
use of
.B ssh\-agent
or similar is strongly recommended.
.PP
\fBDebootstrap\fR can be used to install Devuan in a system without using an
installation disk but can also be used to run a different Devuan flavor in a \fBchroot\fR
environment.
This way you can create a full (minimal) Devuan installation which
can be used for testing purposes (see the \fBEXAMPLES\fR section).
If you are looking for a chroot system to build packages please take a look at
\fBpbuilder\fR.
.SH "OPTIONS"
.PP
.IP "\fB\-\-arch=ARCH\fP"
Set the target architecture (use if dpkg isn't installed).
See also \-\-foreign.
.IP
.IP "\fB\-\-include=alpha,beta\fP"
Comma separated list of packages which will be added to download and extract
lists.
.IP
.IP "\fB\-\-exclude=alpha,beta\fP"
Comma separated list of packages which will be removed from download and
extract lists.
WARNING: you can and probably will exclude essential packages, be
careful using this option.
.IP
.IP "\fB\-\-components=alpha,beta\fP"
Use packages from the listed components of the archive.
.IP
.IP "\fB\-\-no\-resolve\-deps\fP"
By default, debootstrap will attempt to automatically resolve any missing
dependencies, warning if any are found.
Note that this is not a complete dependency resolve in the sense of dpkg
or apt, and that it is far better to specify the entire base system than
rely on this option.
With this option set, this behaviour is disabled.
.IP
.IP "\fB\-\-variant=minbase|buildd|fakechroot\fP"
Name of the bootstrap script variant to use.
Currently, the variants supported are minbase, which only includes
essential packages and apt; buildd, which installs the build-essential
packages into
.IR TARGET ;
and fakechroot, which installs the packages without root privileges.
The default, with no \fB\-\-variant=X\fP argument, is to create a base
Devuan installation in
.IR TARGET .
.IP
.IP "\fB\-\-merged-usr\fP"
Create /{bin,sbin,lib}/ symlinks pointing to their counterparts in /usr/.
.IP
.IP "\fB\-\-no-merged-usr\fP"
Do not create /{bin,sbin,lib}/ symlinks pointing to their counterparts in /usr/.
(Default.)
.IP
.IP "\fB\-\-keyring=KEYRING\fP"
Override the default keyring for the distribution being bootstrapped,
and use
.IR KEYRING
to check signatures of retrieved Release files.
.IP
.IP "\fB\-\-no-check-gpg\fP"
Disables checking gpg signatures of retrieved Release files.
.IP
.IP "\fB\-\-force-check-gpg\fP"
Forces checking Release file signatures, disabling automatic fallback to
HTTPS in case of a missing keyring. Incompatible with the previous option.
.IP
.IP "\fB\-\-verbose\fP"
Produce more info about downloading.
.IP
.IP "\fB\-\-print\-debs\fP"
Print the packages to be installed, and exit.
Note that a TARGET directory must be specified so debootstrap can
download Packages files to determine which packages should be installed,
and to resolve dependencies.
The TARGET directory will be deleted unless \-\-keep\-debootstrap\-dir
is specified.
.IP
.IP "\fB\-\-download\-only\fP"
Download packages, but don't perform installation.
.IP
.IP "\fB\-\-foreign\fP"
Do the initial unpack phase of bootstrapping only, for example if the
target architecture does not match the host architecture.
A copy of debootstrap sufficient for completing the bootstrap process
will be installed as /debootstrap/debootstrap in the target filesystem.
You can run it with the \fB\-\-second\-stage\fP option to complete the
bootstrapping process.
.IP
.IP "\fB\-\-second\-stage\fP"
Complete the bootstrapping process.
Other arguments are generally not needed.
.IP
.IP "\fB\-\-second\-stage\-target=DIR\fP"
Run second stage in a subdirectory instead of root. (can be used to create
a foreign chroot) (requires \-\-second\-stage)
.IP
.IP "\fB\-\-keep\-debootstrap\-dir\fP"
Don't delete the /debootstrap directory in the target after completing the
installation.
.IP
.IP "\fB\-\-unpack\-tarball=FILE\fP"
Acquire .debs from tarball FILE instead of downloading via http.
.IP
.IP "\fB\-\-make\-tarball=FILE\fP"
Instead of bootstrapping, make a tarball (written to FILE) of the downloaded
packages.
The resulting tarball may be passed to a later
.BR \-\-unpack\-tarball .
.IP
.IP "\fB\-\-debian\-installer\fP"
Used for internal purposes by the debian-installer
.IP
.IP "\fB\-\-extractor=TYPE\fP"
Override automatic .deb extractor selection to
.IR TYPE .
Supported extractors are: dpkg-deb and ar.
.IP
.IP "\fB\-\-no\-check\-certificate\fP"
Do not check certificate against certificate authorities
.IP
.IP "\fB\-\-certificate=FILE\fP"
Use the client certificate stored in file (PEM)
.IP
.IP "\fB\-\-private\-key=FILE\fP"
Read the private key from file
.SH EXAMPLES
.
.PP
To setup an \fIascii\fR system:
.PP
debootstrap ascii ./ascii-chroot http://packages.devuan.org/merged
.PP
debootstrap ascii ./ascii-chroot file:///LOCAL_MIRROR/devuan
.PP
Full process to create a complete Devuan installation of \fIceres\fR (unstable)
in a chroot:
.PP
main # debootstrap ceres ceres-root http://packages.devuan.org/merged
[ ... watch it download the whole system ]
main # echo "proc ceres-root/proc proc defaults 0 0" >> /etc/fstab
main # mount proc ceres-root/proc -t proc
main # echo "sysfs ceres-root/sys sysfs defaults 0 0" >> /etc/fstab
main # mount sysfs ceres-root/sys -t sysfs
main # cp /etc/hosts ceres-root/etc/hosts
main # chroot ceres-root /bin/bash
.SH AUTHOR
.B debootstrap
This manpage was originally written by Anthony Towns <ajt@debian.org>.
and Matt Kraai <kraai@debian.org> for Debian.
It was re-worked for Devuan by Daniel Reurich <daniel@centurion.net.nz>.

File diff suppressed because it is too large Load diff

View file

@ -1,202 +0,0 @@
mirror_style release
download_style apt
finddebs_style from-indices
variants - buildd fakechroot minbase
keyring /usr/share/keyrings/tanglu-archive-keyring.gpg
default_mirror http://archive.tanglu.org/tanglu
if doing_variant fakechroot; then
test "$FAKECHROOT" = "true" || error 1 FAKECHROOTREQ "This variant requires fakechroot environment to be started"
fi
case $ARCH in
alpha|ia64) LIBC="libc6.1" ;;
kfreebsd-*) LIBC="libc0.1" ;;
hurd-*) LIBC="libc0.3" ;;
*) LIBC="libc6" ;;
esac
work_out_debs () {
required="$(get_debs Priority: required)"
if doing_variant - || doing_variant fakechroot; then
#required="$required $(get_debs Priority: important)"
# ^^ should be getting debconf here somehow maybe
base="$(get_debs Priority: important)"
# we want the Tanglu minimal dependency set to be installed
base="$base tanglu-minimal"
elif doing_variant buildd; then
base="apt build-essential"
elif doing_variant minbase; then
base="apt"
fi
if doing_variant fakechroot; then
# ldd.fake needs binutils
required="$required binutils"
fi
case $MIRRORS in
https://*)
base="$base apt-transport-https ca-certificates"
;;
esac
}
first_stage_install () {
extract $required
mkdir -p "$TARGET/var/lib/dpkg"
: >"$TARGET/var/lib/dpkg/status"
: >"$TARGET/var/lib/dpkg/available"
setup_etc
if [ ! -e "$TARGET/etc/fstab" ]; then
echo '# UNCONFIGURED FSTAB FOR BASE SYSTEM' > "$TARGET/etc/fstab"
chown 0:0 "$TARGET/etc/fstab"; chmod 644 "$TARGET/etc/fstab"
fi
setup_devices
x_feign_install () {
local pkg="$1"
local deb="$(debfor $pkg)"
local ver="$(extract_deb_field "$TARGET/$deb" Version)"
mkdir -p "$TARGET/var/lib/dpkg/info"
echo \
"Package: $pkg
Version: $ver
Maintainer: unknown
Status: install ok installed" >> "$TARGET/var/lib/dpkg/status"
touch "$TARGET/var/lib/dpkg/info/${pkg}.list"
}
x_feign_install dpkg
}
second_stage_install () {
setup_dynamic_devices
x_core_install () {
smallyes '' | in_target dpkg --force-depends --install $(debfor "$@")
}
p () {
baseprog="$(($baseprog + ${1:-1}))"
}
if doing_variant fakechroot; then
setup_proc_fakechroot
else
setup_proc
in_target /sbin/ldconfig
fi
DEBIAN_FRONTEND=noninteractive
DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND DEBCONF_NONINTERACTIVE_SEEN
baseprog=0
bases=7
p; progress $baseprog $bases INSTCORE "Installing core packages" #1
info INSTCORE "Installing core packages..."
p; progress $baseprog $bases INSTCORE "Installing core packages" #2
ln -sf mawk "$TARGET/usr/bin/awk"
x_core_install base-passwd
x_core_install base-files
p; progress $baseprog $bases INSTCORE "Installing core packages" #3
x_core_install dpkg
if [ ! -e "$TARGET/etc/localtime" ]; then
ln -sf /usr/share/zoneinfo/UTC "$TARGET/etc/localtime"
fi
if doing_variant fakechroot; then
install_fakechroot_tools
fi
p; progress $baseprog $bases INSTCORE "Installing core packages" #4
x_core_install $LIBC
p; progress $baseprog $bases INSTCORE "Installing core packages" #5
x_core_install perl-base
p; progress $baseprog $bases INSTCORE "Installing core packages" #6
rm "$TARGET/usr/bin/awk"
x_core_install mawk
p; progress $baseprog $bases INSTCORE "Installing core packages" #7
if doing_variant -; then
x_core_install debconf
fi
baseprog=0
bases=$(set -- $required; echo $#)
info UNPACKREQ "Unpacking required packages..."
exec 7>&1
smallyes '' |
(repeatn 5 in_target_failmsg UNPACK_REQ_FAIL_FIVE "Failure while unpacking required packages. This will be attempted up to five times." "" \
dpkg --status-fd 8 --force-depends --unpack $(debfor $required) 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases UNPACKREQ "Unpacking required packages" UNPACKING
info CONFREQ "Configuring required packages..."
mv "$TARGET/sbin/start-stop-daemon" "$TARGET/sbin/start-stop-daemon.REAL"
echo \
"#!/bin/sh
echo
echo \"Warning: Fake start-stop-daemon called, doing nothing\"" > "$TARGET/sbin/start-stop-daemon"
chmod 755 "$TARGET/sbin/start-stop-daemon"
setup_dselect_method apt
smallyes '' |
(in_target_failmsg CONF_REQ_FAIL "Failure while configuring required packages." "" \
dpkg --status-fd 8 --configure --pending --force-configure-any --force-depends 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases CONFREQ "Configuring required packages" CONFIGURING
baseprog=0
bases="$(set -- $base; echo $#)"
info UNPACKBASE "Unpacking the base system..."
setup_available $required $base
done_predeps=
while predep=$(get_next_predep); do
# We have to resolve dependencies of pre-dependencies manually because
# dpkg --predep-package doesn't handle this.
predep=$(without "$(without "$(resolve_deps $predep)" "$required")" "$done_predeps")
# XXX: progress is tricky due to how dpkg_progress works
# -- cjwatson 2009-07-29
p; smallyes '' |
in_target dpkg --force-overwrite --force-confold --skip-same-version --install $(debfor $predep)
base=$(without "$base" "$predep")
done_predeps="$done_predeps $predep"
done
smallyes '' |
(repeatn 5 in_target_failmsg INST_BASE_FAIL_FIVE "Failure while installing base packages. This will be re-attempted up to five times." "" \
dpkg --status-fd 8 --force-overwrite --force-confold --skip-same-version --unpack $(debfor $base) 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases UNPACKBASE "Unpacking base system" UNPACKING
info CONFBASE "Configuring the base system..."
smallyes '' |
(repeatn 5 in_target_failmsg CONF_BASE_FAIL_FIVE "Failure while configuring base packages. This will be re-attempted up to five times." "" \
dpkg --status-fd 8 --force-confold --skip-same-version --configure -a 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases CONFBASE "Configuring base system" CONFIGURING
mv "$TARGET/sbin/start-stop-daemon.REAL" "$TARGET/sbin/start-stop-daemon"
progress $bases $bases CONFBASE "Configuring base system"
info BASESUCCESS "Base system installed successfully."
}

View file

@ -1,163 +0,0 @@
default_mirror http://old-releases.ubuntu.com/ubuntu
mirror_style release
download_style apt
finddebs_style from-indices
variants - buildd
case $ARCH in
alpha|ia64) LIBC="libc6.1" ;;
*) LIBC="libc6" ;;
esac
work_out_debs () {
required="$(get_debs Priority: required)"
if doing_variant -; then
#required="$required $(get_debs Priority: important)"
# ^^ should be getting debconf here somehow maybe
base="$(get_debs Priority: important)"
elif doing_variant buildd; then
# TODO: add Build-Essential: yes extraoverrides
#base="$(get_debs Build-Essential: yes)"
add () { if [ "$ARCH" = "$1" ]; then eval "$2=\"\$$2 $3\""; fi; }
base="apt binutils cpio cpp cpp-4.0 dpkg-dev g++ g++-4.0 gcc gcc-4.0 gcc-4.0-base ${LIBC}-dev libdb4.2 libgdbm3 libstdc++6 libstdc++6-4.0-dev linux-kernel-headers make patch perl perl-modules"
add ia64 base "libunwind7-dev"
add sparc base "lib64gcc1"
add sparc base "libc6-dev-sparc64"
add sparc base "libc6-sparc64"
fi
}
first_stage_install () {
extract $required
mkdir -p "$TARGET/var/lib/dpkg"
: >"$TARGET/var/lib/dpkg/status"
echo >"$TARGET/var/lib/dpkg/available"
setup_etc
if [ ! -e "$TARGET/etc/fstab" ]; then
echo '# UNCONFIGURED FSTAB FOR BASE SYSTEM' > "$TARGET/etc/fstab"
chown 0:0 "$TARGET/etc/fstab"; chmod 644 "$TARGET/etc/fstab"
fi
setup_devices
x_feign_install () {
local pkg="$1"
local deb="$(debfor $pkg)"
local ver="$(extract_deb_field "$TARGET/$deb" Version)"
mkdir -p "$TARGET/var/lib/dpkg/info"
echo \
"Package: $pkg
Version: $ver
Status: install ok installed" >> "$TARGET/var/lib/dpkg/status"
touch "$TARGET/var/lib/dpkg/info/${pkg}.list"
}
x_feign_install dpkg
}
second_stage_install () {
x_core_install () {
smallyes '' | in_target dpkg --force-depends --install $(debfor "$@")
}
p () {
baseprog="$(($baseprog + ${1:-1}))"
}
setup_proc
umount_on_exit /dev/.static/dev
umount_on_exit /dev
in_target /sbin/ldconfig
DEBIAN_FRONTEND=noninteractive
DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND DEBCONF_NONINTERACTIVE_SEEN
baseprog=0
bases=7
p; progress $baseprog $bases INSTCORE "Installing core packages" #1
info INSTCORE "Installing core packages..."
p; progress $baseprog $bases INSTCORE "Installing core packages" #2
ln -sf mawk "$TARGET/usr/bin/awk"
x_core_install base-files base-passwd
p; progress $baseprog $bases INSTCORE "Installing core packages" #3
x_core_install dpkg
if [ ! -e "$TARGET/etc/localtime" ]; then
ln -sf /usr/share/zoneinfo/UTC "$TARGET/etc/localtime"
fi
p; progress $baseprog $bases INSTCORE "Installing core packages" #4
x_core_install $LIBC
p; progress $baseprog $bases INSTCORE "Installing core packages" #5
x_core_install perl-base
p; progress $baseprog $bases INSTCORE "Installing core packages" #6
rm "$TARGET/usr/bin/awk"
x_core_install mawk
p; progress $baseprog $bases INSTCORE "Installing core packages" #7
if doing_variant -; then
x_core_install debconf
fi
baseprog=0
bases=$(set -- $required; echo $#)
info UNPACKREQ "Unpacking required packages..."
smallyes '' |
(repeatn 5 in_target_failmsg UNPACK_REQ_FAIL_FIVE "Failure while unpacking required packages. This will be attempted up to five times." "" \
dpkg --status-fd 8 --force-depends --unpack $(debfor $required) 8>&1 1>&7 |
dpkg_progress $baseprog $bases UNPACKREQ "Unpacking required packages" UNPACKING) 7>&1
info CONFREQ "Configuring required packages..."
mv "$TARGET/sbin/start-stop-daemon" "$TARGET/sbin/start-stop-daemon.REAL"
echo \
"#!/bin/sh
echo
echo \"Warning: Fake start-stop-daemon called, doing nothing\"" > "$TARGET/sbin/start-stop-daemon"
chmod 755 "$TARGET/sbin/start-stop-daemon"
setup_dselect_method apt
smallyes '' |
(in_target_failmsg CONF_REQ_FAIL "Failure while configuring required packages." "" \
dpkg --status-fd 8 --configure --pending --force-configure-any --force-depends 8>&1 1>&7 |
dpkg_progress $baseprog $bases CONFREQ "Configuring required packages" CONFIGURING) 7>&1
baseprog=0
bases="$(set -- $base; echo $#)"
info UNPACKBASE "Unpacking the base system..."
smallyes '' |
(repeatn 5 in_target_failmsg INST_BASE_FAIL_FIVE "Failure while installing base packages. This will be re-attempted up to five times." "" \
dpkg --status-fd 8 --force-auto-select --force-overwrite --force-confold --skip-same-version --unpack $(debfor $base) 8>&1 1>&7 |
dpkg_progress $baseprog $bases UNPACKBASE "Unpacking base system" UNPACKING) 7>&1
info CONFBASE "Configuring the base system..."
smallyes '' |
(repeatn 5 in_target_failmsg CONF_BASE_FAIL_FIVE "Failure while configuring base packages. This will be attempted 5 times." "" \
dpkg --status-fd 8 --force-confold --skip-same-version --configure -a 8>&1 1>&7 |
dpkg_progress $baseprog $bases CONFBASE "Configuring base system" CONFIGURING) 7>&1
mv "$TARGET/sbin/start-stop-daemon.REAL" "$TARGET/sbin/start-stop-daemon"
progress $bases $bases CONFBASE "Configuring base system"
info BASESUCCESS "Base system installed successfully."
}

View file

@ -1,218 +0,0 @@
mirror_style release
download_style apt
finddebs_style from-indices
variants - buildd fakechroot minbase scratchbox
keyring /usr/share/keyrings/devuan-archive-keyring.gpg
if doing_variant fakechroot; then
test "$FAKECHROOT" = "true" || error 1 FAKECHROOTREQ "This variant requires fakechroot environment to be started"
fi
case $ARCH in
alpha|ia64) LIBC="libc6.1" ;;
kfreebsd-*) LIBC="libc0.1" ;;
hurd-*) LIBC="libc0.3" ;;
*) LIBC="libc6" ;;
esac
work_out_debs () {
required="$(get_debs Priority: required)"
devuan_required="devuan-keyring devuan-baseconf sysvinit-core"
devuan_remove="systemd systemd-sysv"
if doing_variant - || doing_variant fakechroot; then
#required="$required $(get_debs Priority: important)"
# ^^ should be getting debconf here somehow maybe
base="$(get_debs Priority: important) $devuan_required"
elif doing_variant buildd || doing_variant scratchbox; then
base="apt build-essential $devuan_required"
elif doing_variant minbase; then
base="apt $devuan_required"
fi
if doing_variant fakechroot; then
# ldd.fake needs binutils
required="$required binutils"
fi
case $MIRRORS in
https://*)
base="$base apt-transport-https ca-certificates"
;;
esac
#base=$(without "$base" "$devuan_remove")
}
first_stage_install () {
case "$CODENAME" in
jessie|jessie-kfreebsd) ;;
*)
EXTRACT_DEB_TAR_OPTIONS="$EXTRACT_DEB_TAR_OPTIONS -k"
setup_merged_usr
;;
esac
extract $required
mkdir -p "$TARGET/var/lib/dpkg"
: >"$TARGET/var/lib/dpkg/status"
: >"$TARGET/var/lib/dpkg/available"
setup_etc
if [ ! -e "$TARGET/etc/fstab" ]; then
echo '# UNCONFIGURED FSTAB FOR BASE SYSTEM' > "$TARGET/etc/fstab"
chown 0:0 "$TARGET/etc/fstab"; chmod 644 "$TARGET/etc/fstab"
fi
setup_devices
}
second_stage_install () {
setup_dynamic_devices
x_feign_install () {
local pkg="$1"
local deb="$(debfor $pkg)"
local ver="$(in_target dpkg-deb -f "$deb" Version)"
mkdir -p "$TARGET/var/lib/dpkg/info"
echo \
"Package: $pkg
Version: $ver
Maintainer: unknown
Status: install ok installed" >> "$TARGET/var/lib/dpkg/status"
touch "$TARGET/var/lib/dpkg/info/${pkg}.list"
}
x_feign_install dpkg
x_core_install () {
smallyes '' | in_target dpkg --force-depends --install $(debfor "$@")
}
p () {
baseprog="$(($baseprog + ${1:-1}))"
}
if doing_variant fakechroot; then
setup_proc_fakechroot
elif doing_variant scratchbox; then
true
else
setup_proc
in_target /sbin/ldconfig
fi
DEBIAN_FRONTEND=noninteractive
DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND DEBCONF_NONINTERACTIVE_SEEN
baseprog=0
bases=7
p; progress $baseprog $bases INSTCORE "Installing core packages" #1
info INSTCORE "Installing core packages..."
p; progress $baseprog $bases INSTCORE "Installing core packages" #2
ln -sf mawk "$TARGET/usr/bin/awk"
x_core_install base-passwd
x_core_install base-files
p; progress $baseprog $bases INSTCORE "Installing core packages" #3
x_core_install dpkg
if [ ! -e "$TARGET/etc/localtime" ]; then
ln -sf /usr/share/zoneinfo/UTC "$TARGET/etc/localtime"
fi
if doing_variant fakechroot; then
install_fakechroot_tools
fi
p; progress $baseprog $bases INSTCORE "Installing core packages" #4
x_core_install $LIBC
p; progress $baseprog $bases INSTCORE "Installing core packages" #5
x_core_install perl-base
p; progress $baseprog $bases INSTCORE "Installing core packages" #6
rm "$TARGET/usr/bin/awk"
x_core_install mawk
p; progress $baseprog $bases INSTCORE "Installing core packages" #7
if doing_variant -; then
x_core_install debconf
fi
baseprog=0
bases=$(set -- $required; echo $#)
info UNPACKREQ "Unpacking required packages..."
exec 7>&1
smallyes '' |
(repeatn 5 in_target_failmsg UNPACK_REQ_FAIL_FIVE "Failure while unpacking required packages. This will be attempted up to five times." "" \
dpkg --status-fd 8 --force-depends --unpack $(debfor $required) 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases UNPACKREQ "Unpacking required packages" UNPACKING
info CONFREQ "Configuring required packages..."
echo \
"#!/bin/sh
exit 101" > "$TARGET/usr/sbin/policy-rc.d"
chmod 755 "$TARGET/usr/sbin/policy-rc.d"
mv "$TARGET/sbin/start-stop-daemon" "$TARGET/sbin/start-stop-daemon.REAL"
echo \
"#!/bin/sh
echo
echo \"Warning: Fake start-stop-daemon called, doing nothing\"" > "$TARGET/sbin/start-stop-daemon"
chmod 755 "$TARGET/sbin/start-stop-daemon"
setup_dselect_method apt
smallyes '' |
(in_target_failmsg CONF_REQ_FAIL "Failure while configuring required packages." "" \
dpkg --status-fd 8 --configure --pending --force-configure-any --force-depends 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases CONFREQ "Configuring required packages" CONFIGURING
baseprog=0
bases="$(set -- $base; echo $#)"
info UNPACKBASE "Unpacking the base system..."
setup_available $required $base
done_predeps=
while predep=$(get_next_predep); do
# We have to resolve dependencies of pre-dependencies manually because
# dpkg --predep-package doesn't handle this.
predep=$(without "$(without "$(resolve_deps $predep)" "$required")" "$done_predeps")
# XXX: progress is tricky due to how dpkg_progress works
# -- cjwatson 2009-07-29
p; smallyes '' |
in_target dpkg --force-overwrite --force-confold --skip-same-version --install $(debfor $predep)
base=$(without "$base" "$predep")
done_predeps="$done_predeps $predep"
done
if [ -n "$base" ]; then
smallyes '' |
(repeatn 5 in_target_failmsg INST_BASE_FAIL_FIVE "Failure while installing base packages. This will be re-attempted up to five times." "" \
dpkg --status-fd 8 --force-overwrite --force-confold --skip-same-version --unpack $(debfor $base) 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases UNPACKBASE "Unpacking base system" UNPACKING
info CONFBASE "Configuring the base system..."
smallyes '' |
(repeatn 5 in_target_failmsg CONF_BASE_FAIL_FIVE "Failure while configuring base packages. This will be re-attempted up to five times." "" \
dpkg --status-fd 8 --force-confold --skip-same-version --configure -a 8>&1 1>&7 || echo EXITCODE $?) |
dpkg_progress $baseprog $bases CONFBASE "Configuring base system" CONFIGURING
fi
mv "$TARGET/sbin/start-stop-daemon.REAL" "$TARGET/sbin/start-stop-daemon"
rm -f "$TARGET/usr/sbin/policy-rc.d"
progress $bases $bases CONFBASE "Configuring base system"
info BASESUCCESS "Base system installed successfully."
}

View file

@ -1,168 +0,0 @@
case $ARCH in
amd64|i386|powerpc|sparc)
default_mirror http://archive.ubuntu.com/ubuntu
;;
*)
default_mirror http://ports.ubuntu.com/ubuntu-ports
;;
esac
mirror_style release
download_style apt
finddebs_style from-indices
variants - buildd
case $ARCH in
alpha|ia64) LIBC="libc6.1" ;;
*) LIBC="libc6" ;;
esac
work_out_debs () {
required="$(get_debs Priority: required)"
if doing_variant -; then
#required="$required $(get_debs Priority: important)"
# ^^ should be getting debconf here somehow maybe
base="$(get_debs Priority: important)"
elif doing_variant buildd; then
# TODO: add Build-Essential: yes extraoverrides
#base="$(get_debs Build-Essential: yes)"
add () { if [ "$ARCH" = "$1" ]; then eval "$2=\"\$$2 $3\""; fi; }
base="apt binutils cpio cpp cpp-4.0 dpkg-dev g++ g++-4.0 gcc gcc-4.0 ${LIBC}-dev libgdbm3 libstdc++6 libstdc++6-4.0-dev linux-kernel-headers make patch perl perl-modules"
add ia64 base "libunwind7-dev"
add sparc base "lib64gcc1"
add sparc base "libc6-dev-sparc64"
add sparc base "libc6-sparc64"
fi
}
first_stage_install () {
extract $required
mkdir -p "$TARGET/var/lib/dpkg"
: >"$TARGET/var/lib/dpkg/status"
: >"$TARGET/var/lib/dpkg/available"
setup_etc
if [ ! -e "$TARGET/etc/fstab" ]; then
echo '# UNCONFIGURED FSTAB FOR BASE SYSTEM' > "$TARGET/etc/fstab"
chown 0:0 "$TARGET/etc/fstab"; chmod 644 "$TARGET/etc/fstab"
fi
setup_devices
x_feign_install () {
local pkg="$1"
local deb="$(debfor $pkg)"
local ver="$(extract_deb_field "$TARGET/$deb" Version)"
mkdir -p "$TARGET/var/lib/dpkg/info"
echo \
"Package: $pkg
Version: $ver
Status: install ok installed" >> "$TARGET/var/lib/dpkg/status"
touch "$TARGET/var/lib/dpkg/info/${pkg}.list"
}
x_feign_install dpkg
}
second_stage_install () {
x_core_install () {
smallyes '' | in_target dpkg --force-depends --install $(debfor "$@")
}
p () {
baseprog="$(($baseprog + ${1:-1}))"
}
setup_proc
in_target /sbin/ldconfig
DEBIAN_FRONTEND=noninteractive
DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND DEBCONF_NONINTERACTIVE_SEEN
baseprog=0
bases=7
p; progress $baseprog $bases INSTCORE "Installing core packages" #1
info INSTCORE "Installing core packages..."
p; progress $baseprog $bases INSTCORE "Installing core packages" #2
ln -sf mawk "$TARGET/usr/bin/awk"
x_core_install base-files base-passwd
p; progress $baseprog $bases INSTCORE "Installing core packages" #3
x_core_install dpkg
if [ ! -e "$TARGET/etc/localtime" ]; then
ln -sf /usr/share/zoneinfo/UTC "$TARGET/etc/localtime"
fi
p; progress $baseprog $bases INSTCORE "Installing core packages" #4
x_core_install $LIBC
p; progress $baseprog $bases INSTCORE "Installing core packages" #5
x_core_install perl-base
p; progress $baseprog $bases INSTCORE "Installing core packages" #6
rm "$TARGET/usr/bin/awk"
x_core_install mawk
p; progress $baseprog $bases INSTCORE "Installing core packages" #7
if doing_variant -; then
x_core_install debconf
fi
baseprog=0
bases=$(set -- $required; echo $#)
info UNPACKREQ "Unpacking required packages..."
smallyes '' |
(repeatn 5 in_target_failmsg UNPACK_REQ_FAIL_FIVE "Failure while unpacking required packages. This will be attempted up to five times." "" \
dpkg --status-fd 8 --force-depends --unpack $(debfor $required) 8>&1 1>&7 |
dpkg_progress $baseprog $bases UNPACKREQ "Unpacking required packages" UNPACKING) 7>&1
info CONFREQ "Configuring required packages..."
mv "$TARGET/sbin/start-stop-daemon" "$TARGET/sbin/start-stop-daemon.REAL"
echo \
"#!/bin/sh
echo
echo \"Warning: Fake start-stop-daemon called, doing nothing\"" > "$TARGET/sbin/start-stop-daemon"
chmod 755 "$TARGET/sbin/start-stop-daemon"
setup_dselect_method apt
smallyes '' |
(in_target_failmsg CONF_REQ_FAIL "Failure while configuring required packages." "" \
dpkg --status-fd 8 --configure --pending --force-configure-any --force-depends 8>&1 1>&7 |
dpkg_progress $baseprog $bases CONFREQ "Configuring required packages" CONFIGURING) 7>&1
baseprog=0
bases="$(set -- $base; echo $#)"
info UNPACKBASE "Unpacking the base system..."
smallyes '' |
(repeatn 5 in_target_failmsg INST_BASE_FAIL_FIVE "Failure while installing base packages. This will be re-attempted up to five times." "" \
dpkg --status-fd 8 --force-auto-select --force-overwrite --force-confold --skip-same-version --unpack $(debfor $base) 8>&1 1>&7 |
dpkg_progress $baseprog $bases UNPACKBASE "Unpacking base system" UNPACKING) 7>&1
info CONFBASE "Configuring the base system..."
smallyes '' |
(repeatn 5 in_target_failmsg CONF_BASE_FAIL_FIVE "Failure while configuring base packages. This will be attempted 5 times." "" \
dpkg --status-fd 8 --force-confold --skip-same-version --configure -a 8>&1 1>&7 |
dpkg_progress $baseprog $bases CONFBASE "Configuring base system" CONFIGURING) 7>&1
mv "$TARGET/sbin/start-stop-daemon.REAL" "$TARGET/sbin/start-stop-daemon"
progress $bases $bases CONFBASE "Configuring base system"
info BASESUCCESS "Base system installed successfully."
}

Some files were not shown because too many files have changed in this diff Show more