Solaris Resource Manager 1.3 System Administration Guide

Initialization Script

The following startup script is supplied with the system as /etc/init.d/init.srm. It is executed with an argument of start as the system is changing to run-level 2 or 3 (multi-user mode). It is also executed with an argument of stop at system shutdown.

#!/bin/sh
#
# Copyright (c) 1998-1999 by Sun Microsystems, Inc.
# All rights reserved.
#
# Copyright 1995-1997 Softway Pty. Ltd.
#
# Start/stop Solaris Resource Manager v1.1
#
#ident  "@(#)init.srm 1.24 99/02/10 SMI"

#######################################################################
# Default values.

DATADIR=/var/srm
ShareDb=$DATADIR/srmDB
LimdaemonOptions=
ChargeOptionsOn="limits=y:share=y:adjgroups=y:limshare=y"
ChargeOptionsOff="limits=n:share=n:adjgroups=n:limshare=n"
DaemonLnode=daemon
LostLnode=srmlost
IdleLnode=srmidle
OtherLnode=srmother

#######################################################################

# ECHO=echo	# For a verbose startup and shutdown
ECHO=:		# For a quiet startup and shutdown

SRMDIR=/usr/srm
SRMBIN=$SRMDIR/bin
SRMSBIN=$SRMDIR/sbin
SRMLIB=$SRMDIR/lib
ETCSRM=/etc/srm

PATH=/sbin:/usr/sbin:/bin:$PATH:$SRMSBIN:$SRMBIN:$SRMLIB
export PATH
case "$1" in
'start')
	if [ ! -x $SRMSBIN/srmadm ]; then 
		echo "Solaris Resource Manager *not* installed." \
			"Missing srmadm command."
		exit
	fi

	# Only bother if sched/SHR is loaded.
	if [ `$SRMSBIN/srmadm` != yes ]
	then
		#
		# Usually this is because /etc/system doesn't have the usual
		#	set initclass="SHR"
		# or at least a set extraclass="SHR"
		#
		echo "Solaris Resource Manager *not* loaded."
		exit
	else
		echo "Enabling Solaris Resource Manager"
		if [ `$SRMSBIN/srmadm show fileopen` = yes ]; then
			echo "SRM database file already open - stopping first."
			limdaemon -k
			sleep 2
			srmadm set $ChargeOptionsOff
			sync
			srmadm set fileopen=n
			$ECHO "SRM inactive"
		fi
		$ECHO "Starting SRM..."
	fi

	# Check the limconf file.
	if [ ! -s $ETCSRM/limconf ]; then
		echo "SRM - file $ETCSRM/limconf is missing " >&2
		echo "SRM not started."
		exit 1
	fi

	if [ ! -f "$ShareDb" ]; then
		echo "SRM database '$ShareDb' not present - " \
			"creating empty database"
		if [ ! -d "$DATADIR" ]; then
			mkdir "$DATADIR"
			chmod 400 "$DATADIR"
			chown root "$DATADIR"
			chgrp root "$DATADIR"
		fi
		touch "$ShareDb" ||
		{
			echo "Failed to create '$ShareDb'" >&2
			echo "SRM not started"
			exit 1
		}
		chmod 400 "$ShareDb"
		chown root "$ShareDb"
		chgrp root "$ShareDb"
	fi

	CreateLnodes=0
	if [ ! -s "$ShareDb" ]; then
		$ECHO "SRM Warning: Using empty database" >&2
		CreateLnodes=1
	fi

	$ECHO "SRM starting ... \c"

	# Open Lnode file.
	srmadm set -f "$ShareDb" fileopen=y

	if [ $? != 0 ]; then
		echo
		echo "srmadm set -f $ShareDb failed" >&2
		echo "SRM not started"
		exit 1
	fi

	# Set SRM global options.
	srmadm set $ChargeOptionsOn
	if [ $? != 0 ]; then
		echo
		echo "srmadm set $ChargeOptionsOn failed" >&2
		echo "SRM not completely enabled"
		exit 1
	fi

	# Create if needed the daemon lnode.
	liminfo "$DaemonLnode" 2>/dev/null | \
		grep "^Login name:  *$DaemonLnode " >/dev/null 2>&1
	if [ $? -ne 0 ]; then
		# If daemon lnode does not, create one.
		limadm set cpu.shares=1 "$DaemonLnode" 2>/dev/null
		limadm set sgroup=root "$DaemonLnode" 2>/dev/null
	fi

	# Create if needed the other lnode.
	liminfo "$OtherLnode" 2>/dev/null | \
		grep "^Login name:  *$OtherLnode " >/dev/null 2>&1
	if [ $? -ne 0 ]; then
		# If "other" sgroup exists but has no lnode, create one.
		limadm set cpu.shares=1 "$OtherLnode" 2>/dev/null
		limadm set sgroup=root "$OtherLnode" 2>/dev/null
	fi

	# Create if needed, and set the lost lnode.
	if [ x"$LostLnode" != x ]; then
        	liminfo "$LostLnode" 2>/dev/null | \
			grep "^Login name:  *$LostLnode " >/dev/null 2>&1
		if [ $? -ne 0 ]; then
			limadm set cpu.shares=1 "$LostLnode"
			limadm set sgroup=root "$LostLnode"
		fi

		srmadm set lost="$LostLnode" ||

		$ECHO "SRM - Warning: No user '$LostLnode' for lost lnode"
	fi

	# Create if needed, and set the idle lnode.
	if [ x"$IdleLnode" != x ]; then
        	liminfo "$IdleLnode" 2>/dev/null | \
			grep "^Login name:  *$IdleLnode " >/dev/null 2>&1
		if [ $? -ne 0 ]; then
			limadm set cpu.shares=0 "$IdleLnode"
			limadm set sgroup=root "$IdleLnode"
		fi
		srmadm set idle="$IdleLnode" ||
		$ECHO "SRM - Warning: No user '$IdleLnode' for idle lnode"
	fi

	# If creating SRM database, set up existing users.
	if [ "$CreateLnodes" -eq 1 ]; then
		echo "SRM - creating user lnodes; may take a while"
	# We now want to catch any other users which were not found
	# on the filesystems.  First we need to decide what the maximum
	# uid value we will create an l-node entry for in the database.
	# We choose less than the uid for 'nobody' so that we can try
	# and minimise the apparent size of the database (which is a sparse
	# file).  If the user 'nobody' does not exist then we just have
	# to take our chances with using all possible uid values.
	# Unfortunately all this means that there are certain circumstances
	# where not all users will be taken into account.

	MaxUID=`awk -F: "\\$1==\"nobody\" { print \\$3 - 100 }" /etc/passwd`
		if [ $? -eq 0 -a x"$MaxUID" != x ] ; then
			Cond="uid >= 0 && uid < $MaxUID && !flag.real"
		else
			Cond="uid >= 0 && !flag.real"
		fi
		UIDS=`limreport "$Cond" '%d\n' uid | wc -l`
		if [ $UIDS -gt 0 ]; then
			$ECHO "$UIDS other lnodes to be created" \
				"due to passwd entries"
			CMDS="limadm set cpu.shares=1:sgroup=$OtherLnode"
			limreport "$Cond" "$CMDS %d\necho ' uid %7d\r\c'\n" \
				uid uid | sh
			echo
		fi
	fi

	limdaemon $LimdaemonOptions

	echo "Solaris Resource Manager Enabled."
    ;;

'stop')
	# SRM shutdown should be done as late as possible before
	# filesystems are unmounted. 
	if [ -x $SRMSBIN/srmadm ] && $SRMSBIN/srmadm show fileopen > /dev/null
	then
		limdaemon -k
		sleep 2
		srmadm set $ChargeOptionsOff
		srmadm set fileopen=n
		sync
		$ECHO "Solaris Resource Manager Disabled" 
	fi
	;;

*)
	echo "Usage: $0 {start|stop}"
	;;
esac

Default 'no lnode' Script

This script creates the lnode in the default scheduling group (other if such a user exists in the password map, otherwise root) and mails the system administrator a reminder to move the new lnode into the appropriate place in the scheduling hierarchy.

#!/bin/sh
#
#ident  "@(#)nolnode.sh 1.10 99/05/07 SMI"
#
# Copyright (c) 1998-1999 by Sun Microsystems, Inc.
# All rights reserved.
#
# Copyright 1995-1997 Softway Pty. Ltd.
#
# A script that called by the PAM module to create a lnode
#

PATH=/usr/srm/sbin:/usr/srm/bin:/sbin:/bin export PATH
LOCALE=C export LOCALE
if [ "$DEBUG" = "true" ]
then
    exec >> /tmp/nolnodelog 2>&1
    echo
    date
    echo "Attempting to create lnode for $USER"
else
    exec > /dev/null  2>&1
fi

err=`limadm set -u cpu.shares=1 "$UID" 2>&1`
if [ $? -eq 0 ]
then
    SRM_GROUP=`liminfo -v $USER | grep '^sgroupname' | awk '{ print $2 }'`
    SRM_SHARE=`liminfo -v $USER | grep '^cpu.shares' | awk '{ print $2 }'`
    export SRM_GROUP SRM_SHARE
    cat <<-EOF | /usr/lib/sendmail root 
	Subject: New lnode created for "$USER"

	Remember to change scheduling group and shares for
	"$USER". Currently in group "$SRM_GROUP" with $SRM_SHARE share.

EOF
else
    cat <<-EOF | /usr/lib/sendmail root
	Subject: Could not create lnode for "$USER"

	after "$SERVICE" attempt on tty "$TTY", uid "$UID", 
	rhost "$RHOST",
	limadm said "$err"
EOF
    exit 1 # deny access
fi
# permit access
exit 0