Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
<!--}}}-->
To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* [[MainMenu]]: The menu (usually on the left)
* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
<<importTiddlers>>
<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
<!--}}}-->
These [[InterfaceOptions]] for customising [[TiddlyWiki]] are saved in your browser

Your username for signing your edits. Write it as a [[WikiWord]] (eg [[JoeBloggs]])

<<option txtUserName>>
<<option chkSaveBackups>> [[SaveBackups]]
<<option chkAutoSave>> [[AutoSave]]
<<option chkRegExpSearch>> [[RegExpSearch]]
<<option chkCaseSensitiveSearch>> [[CaseSensitiveSearch]]
<<option chkAnimate>> [[EnableAnimations]]

----
Also see [[AdvancedOptions]]
<!--{{{-->
<div class='header' role='banner' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' role='navigation' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' role='navigation' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' role='complementary' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea' role='main'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected {color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}
.readOnly {background:[[ColorPalette::TertiaryPale]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:alpha(opacity=60);}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0 1em 1em; left:0; top:0;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 0.3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0 0; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0;}
.wizardFooter .status {padding:0 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0 0 0.5em;}
.tab {margin:0 0 0 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0 0.25em; padding:0 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0 3px 0 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0; font-size:.9em;}
.editorFooter .button {padding-top:0; padding-bottom:0;}

.fieldsetFix {border:0; padding:0; margin:1px 0px;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0; right:0;}
#backstageButton a {padding:0.1em 0.4em; margin:0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin-left:3em; padding:1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none !important;}
#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
<!--{{{-->
<div class='toolbar' role='navigation' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="700" height="460" poster="" data-setup="{}">
    <source src="video/putty.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>

{{Warning{Note:  The connection port shown in the video must be changed from {{Monospaced{22}}} to {{Monospaced{2205}}}. }}}
<html>
<table border=0>
<tr border=0><TD align=center border=0>
<iframe src="https://www.google.com/calendar/embed?src=cl5tpjhd9f3bj9c52idttghtsg%40group.calendar.google.com&ctz=America/New_York" style="border: 0" width="1000" height="600" frameborder="0" scrolling="no"></iframe>
<BR><B>This calendar is authoritative should there be due date conflicts with other pages on the site</B>
</TD></TR>
</table>
</html>

A traditional class meeting on campus naturally allows for regular communication.  This is beneficial by helping students better understand the material along with allowing the instructor to more easily gauge how everyone is doing in the class.

I would like to ensure hosting this course online does not deprive us of regular communication.  Class participation will be worth 10% of your overall grade.  Each week's class participation will be worth ''50 points'' total. Multiple posts each week will be necessary to receive full credit.

Posts for each week must be made by Sunday, 11:55 p.m. (EST) the following week in order to receive full credit.  This allows one week to post questions about outstanding assignments and one week to post questions about labs after they have been returned.  Organization is important - __please post lab questions on the discussion board for the week they were assigned__.

Joining group Zoom meetings will also earn class participation credit.  


!! Participation:

You may ask questions, work collaboratively on assignments, and provide assistance to one another in Discord. You can also provide ideas or helpful resources that assisted you on your assignments.  Credit may also be received for joining or participating in either regularly scheduled or ad-hoc group Zoom meetings.

!! Rubric for weekly class participation:

* 25 points - Actively participate in a group Zoom meeting
* 10-20 points - High quality posts which contain well-developed questions, actionable suggestions, or recommendations
* 15 points - Attend a group Zoom meeting
* 5-10 points - General comments regarding the assignments.  No specific insights directly related to the problem or responses to questions which are not actionable.


!! Quality of Remarks:

You will be evaluated based on the quality of your participation by asking questions, providing constructive assistance, making recommendations related to our material, and making pertinent comments.

The discussion forum and Zoom meetings are a valuable component of learning since they allow you to see a variety of solutions and ideas just like you would in a classroom.

Generally, please do not post direct solutions to lab questions, especially unsolicited, before their due date.  Doing so will not be awarded participation points.  If someone is genuinely stuck on a problem and you'd like to help, guidance towards the solution is always a more beneficial place to start rather then just posting the answer.  If you just post the answer, I cannot tell if someone understands the problem or simply copied your solution.

Please be sure to check out the [[Using Discord]] page to see more useful information.


!! Adding New Threads

Good organization is important.  Create new threads in the weekly Discord channels in which the material was assigned.  When naming your threads, use something descriptive in the name and not just the lab and question number.  The highlighted thread is a model to follow and will make things easier to find as the number of posts grows.  Be sure to scan for an existing thread relating to your topic before creating a new one.  Usability is an important consideration in what you do.  ''Not using descriptive thread titles is detrimental to usability, so that post will not receive full credit.''

[img[img/discussionBoards.png]]
(yes, this screenshot is from Blackboard, but the point still stands.  The [[Using Discord]] page has more detail about Discord threads.)


Our class utilizes a [[DigitalOcean|https://www.digitalocean.com/products/droplets/]] droplet for the first half and a [[Hetzner bare-metal auction|https://www.hetzner.com/sb?ram_from=8&ram_to=10&ssd=true&cpu_from=8000&cpu_to=30000&price_from=50&price_to=90&search=Xeon+E5]] server to support the student lab environment during the second half of the semester.

!! ~DigitalOcean droplet

Our needs are very minimal for the first half of the semester; we only require a Linux shell server everyone can access to practice the commands and submit their work.  ~CentOS is used as our Linux distribution.  

A $5 per month ~DigitalOcean droplet (virtual machine) is more than enough.  ~DigitalOcean droplets are great for small projects like this.  We'll use it for two months then take a snapshot and destroy the droplet to save money.  It'll then be brought back from the snapshot when it's needed again for the next semester.


!! Hetzner Bare-metal

About halfway through the semester we'll switch from being Unix users to administrators.  Each student will be given a small collection of virtual machines to install and configure.  Additional resources are required for this portion of the class since each student will require about 6 ~VMs. Instead of just a single VM to cover the entire class, we'll now need a full server.  Hetzner auction servers have been a reliable, low-cost option for such short-term needs.  

To provide enough resources for the entire class, I'll be looking for a server with the following minimum specs:
* 64gb RAM
* CPU with at least 6 cores at 3.4GHz.  I'm currently using the ~E5-1650V2.
* 2x 256gb SSD (SSD drives are important for disk speeds.  SATA are too slow)

A server with these specs comes to about 60,00 € per month.  We'll need it for 2 months.  The total infrastructure cost for this class per-semester is then about $150.

The server is initially provisioned by Hetzner with Debian Linux and [[Proxmox|https://www.proxmox.com/en/]] is then installed to act as our hypervisor.  Proxmox runs on Debian and can either be installed from [[its own CD image|https://www.proxmox.com/en/downloads]] or the [[packages can be installed on an existing Debian system|https://pve.proxmox.com/wiki/Install_Proxmox_VE_on_Debian_Buster]].  We'll use the latter option here since Hetzner must install the original OS for us and they do not have an option for Proxmox.

After the Hetzner server and Proxmox are installed, ~VMs will be created for our class shell server and for internal monitoring.  The class shell server will then be migrated from ~DigitalOcean.  Student ~VMs will be created from templates.

At the end of the semester everyone will be given the option to download their ~VMs for use locally with ~VirtualBox.  Student ~VMs will then be deleted, administrative ~VMs will be backed up to ~BackBlaze B2 storage for next time, then the server contract will be ended.

Other tools/services used:
* [[Fossil source code manager|https://fossil-scm.org/home/doc/trunk/www/index.wiki]] - Used to handle revision control for server configuration files and scripts
* [[SaltStack Infrastructure management|https://docs.saltproject.io/en/latest/]] - Used to orchestrate VM templates and manage infrastructure monitoring
* [[Naemon monitoring suite|https://www.naemon.org/]] - Used to monitor student ~VMs and provide feedback on completed/outstanding tasks
* [[Docker containerization|https://www.docker.com/]] - Used to rapidly deploy and isolate different services on the same VM in a way that can be easily repeated.
* [[BackBlaze B2 cloud storage|https://www.backblaze.com/b2/cloud-storage.html]] - Used to store management ~VMs and VM templates between semesters.  Storage here costs $0.005 per Gb.

The combination of these tools allow for the Hetzner server to be quickly brought online when needed for the new semester, it's ~VMs and templates downloaded from ~BackBlaze B2 storage, and made ready to support our class.


Useful concepts:
* [[Infrastructure as code|https://en.wikipedia.org/wiki/Infrastructure_as_code]] - Rapidly provision servers, ~VMs, and Docker containers for individual services using ~APIs & orchestration tools with pre-made definition files instead of manually.  Using this concept, our class lab server is brought up from bare metal to fully online and ready to support users in about 30 minutes with just a handful of commands.

/***
!! CollapseTiddlersPlugin
^^Author: Bradley Meck^^
^^Source: http://gensoft.revhost.net/Collapse.html^^

|ELS 2/24/2006: added fallback to "CollapsedTemplate if "WebCollapsedTemplate" is not found |
|ELS 2/6/2006: added check for 'readOnly' flag to use alternative "WebCollapsedTemplate" |

***/

config.commands.collapseTiddler = {
text: "fold",
tooltip: "Collapse this tiddler",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
if(e.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if(e.getAttribute("template") != t ){
e.setAttribute("oldTemplate",e.getAttribute("template"));
story.displayTiddler(null,title,t);
}
}
}
}

config.commands.expandTiddler = {
text: "unfold",
tooltip: "Expand this tiddler",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
story.displayTiddler(null,title,e.getAttribute("oldTemplate"));
}
}

config.macros.collapseAll = {
handler: function(place,macroName,params,wikifier,paramString,tiddler){
createTiddlyButton(place,"collapse all","",function(){
story.forEachTiddler(function(title,tiddler){
if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE])
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
story.displayTiddler(null,title,t);
})})
}
}

config.macros.expandAll = {
handler: function(place,macroName,params,wikifier,paramString,tiddler){
createTiddlyButton(place,"expand all","",function(){
story.forEachTiddler(function(title,tiddler){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if(tiddler.getAttribute("template") == t) story.displayTiddler(null,title,tiddler.getAttribute("oldTemplate"));
})})
}
}

config.commands.collapseOthers = {
text: "focus",
tooltip: "Expand this tiddler and collapse all others",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
story.forEachTiddler(function(title,tiddler){
if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if (e==tiddler) t=e.getAttribute("oldTemplate");
//////////
// ELS 2006.02.22 - removed this line. if t==null, then the *current* view template, not the default "ViewTemplate", will be used.
// if (!t||!t.length) t=!readOnly?"ViewTemplate":"WebViewTemplate";
//////////
story.displayTiddler(null,title,t);
}
})
}
}
<div><div class='toolbar' macro='toolbar -closeTiddler closeOthers +editTiddler  permalink references jump newHere expandTiddler collapseOthers'></div>
<div class='title' macro='view title'></div></div>
! Material

!! Watch:
* Difference between virtual machines and containers: https://www.youtube.com/watch?v=cjXI-yxqGTI
* Brief Docker intro: https://www.youtube.com/watch?v=_dfLOzuIg2o


! Notes

As servers become more powerful, it is increasing useful to switch from a standard server installation to some form of virtualization.  Virtualization allows us to run several separate instances of an operating system, or different operating systems, on the same physical server.  Consolidating what would have been separate physical servers into one allows us to save on hardware, electrical and networking resources, and physical space.  A reduced hardware footprint is also easier to maintain.  This consolidation has allowed for substansial decrease of the phyiscal footprint occupied by modern datacenters.  What previously occupied and entire room can be reduced to a single rack.

Virtualization is supporting this class.  All of your virtual servers for our material are running on a single large server in a cloud datacenter, along with many other ~VMs for other purposes.  When I first took a course similar to this one many years ago, every student was assigned several physical systems to complete our work.  This hardware consumed an entire lab and the overhead to maintain the hardware consumed a great deal of our time.  

Containers are an alternative to this type of full virtualization, or can be used in addition to it.  Containers, as the name implies, contain a set of resources and isolate them from the rest of the system.  Instead of a service, like apache, having full access to all resources on the operating system its running on, running Apache within a container will limit its scope to only the resources we decide it should have.

!! Install Docker

Docker is one popular container system, where images are published and can be pulled down for use.  The default version of Docker available with ~CentOS is very old.  Instead, we going to add an additional yum repository and install the most recent version.

Execute the following commands to install Docker

# {{Command{ yum install -y yum-utils git }}}
# {{Command{ yum-config-manager &#45;-add-repo https://download.docker.com/linux/centos/docker-ce.repo }}}
# {{Command{ yum install docker-ce docker-compose }}}

!! Start the Docker service

# Configure your system to start the docker service on boot
# Start the docker service now

!! Docker examples
After reading the chapter and reviewing the videos posted above, open the https://docker-curriculum.com/ site and work through its examples.
* Perform these steps on your test VM
* create a scratch space within {{File{ /opt/ }}} when you get to the {{Command{ git clone }}} command.  This will download files to your VM.
* Stop when you reach //Docker on AWS//


! Assignment
<<tiddler [[Lab X - Containerization with Docker]]>>
[[Home]]
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class="editor">Title</div><div class='editor' macro='edit title'></div>
<div class="editor">Tags</div><div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<!--}}}-->
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="984" height="768" poster="" data-setup="{}">
    <source src="video/FoxyProxy.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
!! Problem Reports:

If you have a problem, please send me a report I can work with. I need details of the problem, what you tried, steps you took to diagnose it, documentation you reviewed, screenshots, logs, etc. If you send me something vague like "//X command doesn't work//" with no supporting details, there may not be much I can do for you and I will wait for you to follow up your message with meaningful information. 

The level of assistance I provide will be proportionate to your effort to troubleshoot and supply details. If you do nothing to troubleshoot and send me little information to work with, you should then expect that much effort put into a response.


!! Time management & workload expectations:

SUNY Poly, as well as most others, [[requires 42.5 hours of work per credit hour|https://www.suny.edu/sunypp/documents.cfm?doc_id=168]].  A four-credit course will thus require 170 hours over the course of our 16 week term, or 10.5 hours per week. Going to college full time is effectively a full time job.  I will be expecting that time commitment each week.

Waiting until the last minute to complete, or even worse, begin, the lab assignments will not be a recipe for success.  Review the tasks early so you have plenty of time to research the problems, seek help in the discussion boards, and get up to speed if you are behind on any prerequisite material. 


!! Grading:

All course deliverables will be collected as PDF documents.  Graded copies of these PDF documents will be returned to you containing my annotations.  If you have questions regarding your grade or my comments, please contact me via email.

My grading is more traditional.  Meeting the bare minimum does not yield an A.  A high grade will require intellectual curiosity, problem-solving abilities, and thorough responses.


Letter grades will be assigned as follows:

| !Percent | !Grade |
| 95% ≥ | A |
| 90% ≥ | A- |
| 87% ≥ | B+ |
| 84% ≥ | B |
| 79% ≥ | B- |
| 77% ≥ | C+ |
| 74% ≥ | C |
| 69% ≥ | C- |
| 67% ≥ | D+ |
| 63% ≥ | D |
| ≤ 62% | F |


!NCS 205 Course Notes

[[Getting Started|Week 0]] - Administrative Tasks & course intro

Jan 16 [[Week 1, Part 1]] - Unix Intro
Jan 18 [[Week 1, Part 2]] - The filesystem
Jan 22 [[Week 2, Part 1]] - Exploring the system 
Jan 24 [[Week 2, Part 2]] - Manipulating Files & Directories
Jan 29 [[Week 3, Part 1]] - Links & File Globbing
Jan 31 [[Week 3, Part 2]] - Home Directories & Shell documentation
Feb 5 [[Week 4, Part 1]] - File Permissions
Feb 7 [[Week 4, Part 2]] - Streams & Redirection, Introduction to filters
Feb 12 [[Week 5, Part 1]] - Filters Continued (awk, sed, & tr)
Feb 14 [[Week 5, Part 2]] - Working with grep
Feb 19 [[Week 6, Part 1]] - Catch up & review
Feb 21 [[Week 6, Part 2]] - I/O practice & Quoting
Feb 26 [[Week 7, Part 1]] - Process management & Job control
Feb 28 [[Week 7, Part 2]] - Substitution
Mar 4 [[Week 8, Part 1]] - Text Editors & Shell Scripting Intro
Mar 6 [[Week 8, Part 2]] - Shell Scripting
Mar 11 [[Week 9, Part 1]] - Version Control
Mar 13 [[Week 9, Part 2]] - Shell Scripting 2 
Mar 18 - Spring Break!
Mar 25 [[Week 11, Part 1]] - Continue scripting work

Extra Material: [[Regular Expressions|Week B]] - Fits into the semester here if you'd like to review this extra-credit content.
Extra Material: [[The Environment|Week C]] - Fits about here too.  Also extra-credit.

Important background material - [[Working more efficiently with GNU screen & SSH keys]] and [[Tunnels & Proxies with SSH]]

Mar 27 [[Week 11, Part 2]] - Basic networking & SSH
Apr 1 [[Week 12, Part 1]] - System Basics - Starting and Stopping, init & run levels, layout of the operating system, system configuration (/etc/ files)
Apr 3 [[Week 12, Part 2]] - Expanding our systems: Working with rpm & yum, installing software from package and source
Apr 8 [[Week 13, Part 1]] - Web services
Apr 10 [[Week 13, Part 2]] - Time & Logging
Apr 15 [[Week 14, Part 1]] - DNS
Apr 17 Week 14, Part 2 - Catch up and review
Apr 22 [[Week 15, Part 1]] - Crypto, Securing communications, & Scheduled tasks
Apr 24 [[Week 15, Part 2]] - Access control and user management

Apr 29 - [[Week 16]] - Finals Week

[[hack5 break-in]] - Completely optional, if you like these little puzzles


!!!Agendas for pages in italics are tentative

Extra Credit Material:
&nbsp;&nbsp; - We don't have time to fit this in, but it's good stuff to know:
* [[Week A|Week 8, Part 1]] - With the {{Command{vi}}} material in the Assignment section
* [[Week B]] - Regular Expressions
* [[Week C]] - The Environment
* [[Week E]] - Storage Systems & LVM
* [[Week F]] - Network File System (NFS)
* [[Week G]] - Backups & disaster recovery
* [[Week H]] - Linux Firewalls 
* [[Virtualization & Containers]]

[img[https://www.ncs205.net/img/1x1.png]]
<html>
<font size="-2">Last Updated: 240505 19:49</font>
</html>
/***
To use, add {{{[[Styles HorizontalMainMenu]]}}} to your StyleSheet tiddler, or you can just paste the CSS in directly. See also HorizontalMainMenu and PageTemplate.
***/
/*{{{*/

#topMenu br {display:none; }
#topMenu { background: #39a; }
#topMenu { float: left; }
#topMenu { width: 90%; }
#topMenu { padding: 2px 0 2px 0; }
#topMenu .button,  #topMenu .tiddlyLink { padding-left:1em; padding-right:1em; color:white; font-size:115%;}
#displayArea { margin: 1em 15.7em 0em 1em; }


#rightMenu {
   float: right;
   background: #39a;
   width: 10%;
   padding: 2px 0 2px 0;
}
#rightMenu .button,  #rightMenu .tiddlyLink { padding-left:1em; padding-right:1em; color:white; font-size:115%;}

/* just in case want some QuickOpenTags in your topMenu */
#topMenu .quickopentag { padding:0px; margin:0px; border:0px; }
#topMenu .quickopentag .tiddlyLink { padding-right:1px; }
#topMenu .quickopentag .button { padding-left:1px; border:0px; }


/*}}}*/
!![[Lab 51 - Bring test and www online]]
Assigned [[Week 11, Part 2]]

* Set a root password for your test and www ~VMs so you are able to log into them via SSH.
* Ensure your test and www ~VMs are online and joined to the lab network.  
** The notes above will help you configure networking
*** Use the hostname {{Monospaced{''www.//username//.ncs205.net''}}} and second IP address in your range for your web server.  
** [[Virtual Machines]] - VM information (Linked on the top menu bar)
** Also complete and submit the [[Lab 51|labs/lab51.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
* Connect to your ~VMs via SSH from the class shell server

The Proxmox virtual console is a means to access the ~VMs for initial troubleshooting and in case something goes wrong.  Once your ~VMs are online, all work should be done via SSH login.  

You cannot log into these ~VMs directly since they are on private IP addresses (192.168.x.x) behind the class router.  They can only be accessed by first connecting to the class shell server from home.  Use putty (or another SSH client) to connect to the class shell server and then use the {{Command{ssh}}} command to connect to your ~VMs.

{{Warning{Be sure to keep your ~VMs online and do not power them down, else it'll look like the work hasn't been completed when it comes time for grading.}}}
!![[Lab 52 - VM updates & software installation]]
Assigned [[Week 12, Part 2]]

!!! On both ~VMs:
* Update the OS and currently installed software
* Install the following packages via {{Command{ yum}}}:  man wget nc telnet bind-utils openssh-clients rsync bzip2

* Also complete and submit the [[Lab 52|labs/lab52.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.

These packages will also need to be installed on all future ~VMs.  Make a note of it in your documentation.
!![[Lab 53 - Web Server]]
Assigned [[Week 12, Part 2]]

!!! Lab Tasks
<<<
1. Install apache and PHP on your web server
* Directions for this are above

2. Become familiar with apache and its configuration.  
* Check out the config files within {{File{/etc/httpd/conf/}}}
* The file {{File{/etc/httpd/conf/httpd.conf}}} is the main configuration file
* The Linux Administration Chapter 19 (Apache Web Server) may also be a useful resource.

3. Change the Apache {{Monospaced{''~DocumentRoot''}}} directory to {{File{/opt/work/htdocs}}}
* Create the directory {{File{/opt/work/htdocs}}} on your web server VM
* Make a backup of your Apache configuration file
** Always take a backup of a configuration file before making changes.  See the note below.  This way you'll have a known-good copy to refer to if there's any problems.
* Update the apache configuration lines necessary to make this change 
** (you may need to change this path in more then one location within the Apache config)
* Don't forget to restart Apache after changing its configuration file

4. Download the new {{File{index.html}}} file from my web server at 192.168.12.25 to your new Apache {{Monospaced{''~DocumentRoot''}}} directory
* The file {{File{index.html}}} is the default web page delivered to a client (eg: your web browser).  This file must exist in the correct location with correct permissions so your web server can provide content.

5. Ensure your web server is providing the correct website.  The new site should be 6 lines long and include ''Welcome to ~NCS205!'' in the body.
* Check the apache access logs and ensure you are now seeing a proper ''200'' status code for the web requests.
<<<

{{Warning{''Warning:'' It's always wise to make a backup of a configuration file before making changes.  The easiest way to do so is to copy the file with a timestamp appended to the new file name, for example:  {{Command{cp httpd.conf httpd.conf.210322-1522.bak}}}.  This captures the date & time in a way that's easily sortable.  The {{Command{diff}}} command can compare the config file to a backup, showing lines which differ between the two.  Example:  {{Command{diff httpd.conf httpd.conf.210322-1522.bak}}}
}}}

!!! Lab Deliverable

* Also complete and submit the [[Lab 53|labs/lab53.pdf]] worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
!![[Lab 54 - Set up MediaWiki]]
Assigned [[Week 13, Part 1]]

Complete the steps on this page to install and configure ~MediaWiki

Install [[MediaWiki|http://www.mediawiki.org/wiki/MediaWiki]] and customize it to your tastes.
* Install ~MariaDB
** Add a wiki user and database
* Download the ~MediaWiki source tarball
** Extract its contents to {{File{/opt/work/htdocs/}}}
** Rename the extracted directory to ''wiki''
* Update php and install dependencies
* Set up a tunnel or proxy to access your wiki
** You can access it by IP address until DNS is online:  http://your_IP/wiki/
** Be sure to replace //your_IP// with proper values.
* Configure ~MediaWiki to fully bring it online

* Be sure you can view the wiki after uploading the {{File{~LocalSettings.php}}} file.  It should look something like this:
[img[img/wiki.png]]


* Also complete and submit the [[Lab 54|labs/lab54.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
!! [[Lab 55 - Bring core VM online]]
Assigned [[Week 13, Part 2]]

<<<
Bring new core VM online:
* Hostname = {{Monospaced{core.//username//.ncs205.net}}}
* Use the third IP address in your range
* Apply outstanding updates and ensure your VM is running the latest available kernel
** A system reboot may be necessary if the kernel was also updated
* Also complete and submit the [[Lab 55|labs/lab55.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is online and ready to be evaluated for this lab.

Install additional software:
* Standard packages, as previously discussed and recorded in your class notes
* DNS server software.  The package is {{Monospaced{bind-chroot}}}
* Time packages:  {{Monospaced{ntpdate ntp}}}
<<<
!! [[Lab 56 - Time]]
Assigned [[Week 13, Part 2]]

!!! Modify Hosts file:

Add a record similar to the following to the {{File{/etc/hosts}}} file on all of your ~VMs.  Do not remove any lines which may already be in the file.
<<<
192.168.12.26    core core.merantn.ncs205.net ntp.merantn.ncs205.net loghost ntp
<<<
* The IP address should be the address for your core VM
* Replace my username with yours
* These steps won't work if that line is missing or incorrect. 
* Read about the {{File{/etc/hosts}}} file on page 356 in the Linux Administration textbook.  This file is necessary because we don't have DNS running yet.

/%
!!! Remove chronyd

{{Warning{The chronyd NTP service is already installed and running.  This service will prevent the steps laid out in this lab from successfully completing.  Stop and disable the {{Monospaced{chronyd}}} service before proceeding with this lab.}}}
%/

!!! Install NTP services and syncronize time:

Install {{Monospaced{ntp}}} and {{Monospaced{ntpdate}}} on all ~VMs

core VM:  Configure {{Monospaced{ntpd}}} ({{File{/etc/ntp.conf}}}) as a server:
* By default the ntp configuration allows global access to the NTP server.  
** This can easily be abused
** Disable the first restrict directive and lock things down further
*** Comment out this line:  {{Monospaced{restrict default nomodify notrap nopeer noquery}}}
** Insert this line after the restrict you just disabled:  {{Monospaced{restrict default ignore}}}
* Allow your block of 8 IP addresses to communicate with the NTP service running on your core VM
** Add the appropriate restrict directives
** Be sure to include {{Monospaced{nopeer}}} & {{Monospaced{noquery}}} options
** See my config below for examples
* Insert: {{Monospaced{disable monitor}}}
* Synchronize time from the lab ntp server instead of the ~CentOS servers
** lab ntp server: {{Monospaced{ntp.ncs205.net}}}
* Add restrict directives to allow the naemon server full access:
** {{Monospaced{restrict 192.168.12.15 nomodify notrap}}}

test & www VM (and future ~VMs):  Configure {{Monospaced{ntpd}}} ({{File{/etc/ntp.conf}}}) as a client:
* Synchronize time from the ntp service on your core VM instead of the ~CentOS servers
** Use the hostname {{Monospaced{ntp.//username//.ncs205.net}}} instead of IP addresses
** This hostname should resolve due to the entry you just added to the {{File{/etc/hosts}}} file.  Test it with ping.

My Configs for reference (click the yellow box to expand them):
* Be sure to change host names and IP addresses appropriately:
* +++[My NTP Server] 
{{Monospaced{core# }}} {{Command{grep -v ^# /etc/ntp.conf | uniq}}}
{{{
driftfile /var/lib/ntp/drift

restrict default ignore

restrict 127.0.0.1 
restrict ::1

# Allow your range of 8 IPs to access the NTP server.  Replace my starting IP (24) with yours.
restrict 192.168.12.24 mask 255.255.255.248 nomodify notrap nopeer noquery
# Allow nagios server to check status of NTP
restrict 192.168.12.10 nomodify notrap
restrict 192.168.12.15 nomodify notrap

server ntp.ncs205.net iburst
restrict ntp.ncs205.net notrap nopeer noquery

disable monitor

includefile /etc/ntp/crypto/pw

keys /etc/ntp/keys
}}}
===

* +++[My NTP Clients]
{{Monospaced{www# }}} {{Command{grep -v ^# /etc/ntp.conf | uniq}}}
{{{
driftfile /var/lib/ntp/drift

restrict default ignore

restrict 127.0.0.1 
restrict -6 ::1

server ntp.merantn.ncs205.net
restrict ntp.merantn.ncs205.net notrap nopeer noquery

includefile /etc/ntp/crypto/pw

keys /etc/ntp/keys
}}}
===


!!! Start & enable ntpd
All ~VMs (current and future):
* Set ntpd and ntpdate to start on boot on all ~VMs
* Start the ntpd service now on all ~VMs


!!!! Verify it is working and time is being synchronized properly:

My core VM, an NTP server:
{{{
[root@core ~]# ntpstat
synchronised to NTP server (192.168.12.15) at stratum 3
   time correct to within 56 ms
   polling server every 1024 s

[root@core ~]# ntpq -p
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*192.168.12.15    78.46.60.40    3 u   41   64  377    0.477  -18.800  13.181
}}}

My www server, an NTP client:

It took a few minutes after starting the services for the clock to syncronize:
{{{
[root@www ~]# ntpstat
unsynchronised
  time server re-starting
   polling server every 8 s
}}}

Eventually it did and I saw this:
{{{
[root@www ~]# ntpstat
synchronised to NTP server (192.168.12.26) at stratum 4
   time correct to within 232 ms
   polling server every 64 s

[root@www ~]# ntpq
ntpq> peers
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*core            192.168.12.15     4 u   55   64  377    0.492  -35.066  62.617
}}}


* Also complete and submit the [[Lab 56|labs/lab56.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.


!!! Troubleshooting

Time synchronization with ntpd doesn't happen immediately.  The service needs some time to build trust in its upstream time provider so that it will use it as a time source.  Be sure to allow at least a 30 minute delay after starting the services for this trust to be established before reporting issues.

If you are having difficulty getting time to synchronize, the following commands may help direct you to the root cause:

* {{Command{systemctl status -l ntpd}}}
* {{Command{ntpstat}}}
* {{Command{ntpq -p}}}
* {{Command{echo associations | ntpq}}}
* {{Command{cat /etc/hosts}}}
* {{Command{ps aux | grep ntp}}}

Any requests for help in the discussion boards should include output from the above commands for both your NTP server and any impacted NTP clients along with their ntp configuration file.  A copy/paste of the text into the discussion boards is easier to work with and highlight issues than simple screenshots.  Be sure to include the command & shell prompt with any output included.  ''Do not'' only include output without the shell prompt and command which obtained that output.
!! [[Lab 57 - Logging]]
Assigned [[Week 13, Part 2]]

!!! Modify Hosts file:

Be sure a record similar to the following exists in the file {{File{/etc/hosts}}} file on all of your ~VMs.  This should have been completed in the previous lab.
<<<
{{Monospaced{192.168.12.26    core core.merantn.ncs205.net ntp.merantn.ncs205.net loghost ntp}}}
<<<
* The IP address should be the address for your core VM
* Replace my username with yours
* These steps won't work if that line is missing or incorrect. 

!!! syslog:

!!!! core VM:  
* configure syslog to receive log information from other hosts

On your core VM, find these lines at the top of the file {{File{/etc/rsyslog.conf}}}:
{{{
# Provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514
}}}

Remove the comments from the bottom 2 lines (the first is actually a comment and should remain so).

!!!! www VM:
* configure syslog to also send log information to the core VM

On your www VM, find this line at the bottom of the file {{File{/etc/rsyslog.conf}}}:

{{{
#*.* @@remote-host:514
}}}
* Remove the comment at the beginning of the line
* Change the double &#064;@ to single @  (A single @ means to use UDP)
* Change ''remote-host'' to ''loghost''  
** ''loghost'' is an alias for our core VM.
** Its handy to use aliases like this in case we need to move our log destination.  We can then easily change the alias to point to a different system.  This isn't so convenient when our systems are defined in the {{File{/etc/hosts}}} file, but easy once DNS is in place.

Experiment with logging.  Investigate the logging commands and the log files withing the directory {{File{/var/log/}}}.


!!! Also complete and submit the [[Lab 57|labs/lab57.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is online and ready to be evaluated for this lab.
!! [[Lab 58 - Working with logs]]
Assigned [[Week 13, Part 2]]

This lab provides a brief refresher on working with text files and an introduction to using your syslog logs to investigate issues on your systems.

Automated brute-force password attacks are a common attack vector on the open internet.  An attacker will scan large blocks of IP addresses or the entire internet trying to log into a service using common usernames and passwords.  SSH is a common service to attack since most Unix/Linux systems are running it by default.

There are two primary types of brute-force password attacks:
* [[Password Guessing|https://attack.mitre.org/techniques/T1110/001/]] - //Adversaries with no prior knowledge of legitimate credentials within the system or environment may guess passwords to attempt access to accounts. Without knowledge of the password for an account, an adversary may opt to systematically guess a password using a repetitive or iterative mechanism.  If usernames are also unknown, common usernames may be also be guessed such as root or standard first names.//
* [[Password Spraying|https://attack.mitre.org/techniques/T1110/003/]] - //Adversaries may use a single or small list of commonly used passwords against many different accounts to attempt to acquire valid account credentials. Password spraying uses one password (e.g. 'Password01'), or a small list of commonly used passwords, that may match the complexity policy of the domain.//

Tools like [[hydra|https://www.kali.org/tools/hydra/]] and [[ncrack|https://nmap.org/ncrack/]] along with password lists like [[RockYou|https://github.com/zacheller/rockyou]] ([[RockYou Background|https://en.wikipedia.org/wiki/RockYou#Data_breach]]) facilitate these types of attacks.

A few security best-practices are implemented in our lab environment to thwart these types of attacks:
* Our lab ~VMs are protected behind the class router and cannot be accessed from the internet, so they cannot be remotely scanned by attackers.  This is why you have to access everything through the class shell server - a single choke point is much easier to monitor and control than a wide attack surface.
* The class shell server listens on an alternate SSH port so attackers cannot easily find it.  This also keeps down noise in the logs so real events are easier to see.
* We use Campus usernames, which are cryptic, instead of common names that can be easily guessed.  
* The administrator account common to all Unix/Linux systems, {{Monospaced{root}}}, cannot log in to the class shell server via SSH.  Everyone with privileges must first log in as a normal user and then switch to {{Monospaced{root}}}.  This also improves accountability for authorized users.
* Countermeasures are configured to detect and block rapid failed login attempts to the class shell server.  Even if someone finds our alternate port, they won't be allowed to just beat on the server with failed login attempts.

//Lateral movement// refers to the technique used by attackers to progressively move through a network, gaining access to different systems or resources. Once attackers have initially breached a network, they aim to expand their influence, moving laterally from one system to another, seeking valuable information or increasing their control over the network.  An attacker will try from within to compromise additional systems that were not initially accessible from afar.

Your lab ~VMs cannot be attacked from the internet, but they can easily be attacked from within our lab network.  A brute force SSH attack was launched against your core VM.  Identify the system which launched the attack and any impacted user accounts.

!!! Complete and submit [[Lab 58|labs/lab58.pdf]].  
* This lab will be due ''April 17''.

!! [[Lab 59 - Bind config and zones]]
Assigned [[Week 14, Part 1]]

Our end goal is for everyone to configure their own authoritative name servers for their lab networks.  I own the domain ncs205.net and manage its authoritative name server.  I am delegating control of the subdomain //username//.ncs205.net to each of you.  You will set up the master DNS server for this zone on your core VM.  The DNS server ns5.ncs205.net is running within the lab network.  It is a slave for your //username//.ncs205.net zone.  After you make changes to your DNS server, you will signal to this slave that you have records ready for it.  It will then perform a zone transfer to obtain your DNS records.  Once this zone transfer is complete, the DNS records for your //username//.ncs205.net domain will be globally available.

Begin to configure the DNS server on your core VM 

* Also complete [[Lab 59|labs/lab59.pdf]] and submit this PDF after your DNS server is online.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.


!!! Configuring Bind

Lets stand up our authoritative name server

On your core VM: Install the packages:  {{Monospaced{bind-chroot bind-utils}}}
* bind runs [[chrooted|https://www.journaldev.com/38044/chroot-command-in-linux]] to protect the system.  This security measure isolates the service from the rest of the OS, so an attacker cannot access the rest of the system if they compromise the DNS service.

Edit its config file, {{File{/etc/named.conf}}}

{{Warning{''Note:'' You'll see my username and IP addresses in the configurations below.  Be sure to replace my values with yours.}}}


These options should be set within the //options// section of {{File{named.conf}}}.  Some options already exist, so make sure their values match what's below.  Add any which are not already present.
{{{
	listen-on port 53 { any; };

	allow-query     { any; };
	allow-recursion { ncs205; };
        forwarders { 192.168.12.10; };

}}}

!!! Access control lists:

Add access control lists to the top of the file, after the options section.  This ~ACLs will limit who can query your name server.
{{{
acl "ncs205" {
	127.0.0.1;
	localhost;
        192.168.12.24/29;
};
}}}
This is just an example.  ''24'' is my starting IP address.  Replace ''24'' with your starting IP address.


!!! Forwarders:
The name server will forward any query it can't answer locally (from authoritative zone data or from cache) to the forwarder.
Forwarders are queried in the listed order until an answer is received.  The forwarding IP address above is the name server for our lab ~VMs.
(This was already set above)


!!! Starting bind

It's always a good idea to verify your configuration before restarting the service.  DNS is a critical service.  If you issue a service restart and your configuration is invalid, you'll experience downtime while you sort out the problem.

* Verify the configuration:  {{Command{named-checkconf  /etc/named.conf}}}
** No output will be returned if the configuration file is valid.  Correct any errors which were displayed.

* Start the {{Monospaced{named-chroot}}} service now.
** Be sure to reload the service whenever there's a configuration change.
* Set this service to also start at system boot

{{Warning{''Warning: '' Two services are available - ''named'' and ''named-chroot''.  The two will conflict with eachother.  Be sure you're referring to the ''{{Monospaced{named-chroot}}}'' service when you need to enable, start, stop, or restart bind.}}}

Verify the service is working.  You should be able to look up a DNS record by querying your new DNS server.  The +noall +answer options provide an abbreviated output and do not always need to be used.
{{{
[root@core ~]# dig www.google.com @localhost +noall +answer

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> www.google.com @localhost +noall +answer
;; global options: +cmd
www.google.com.         63      IN      A       172.217.3.100
}}}
It might take a few minutes after the service is started/restarted to be able to query external records.  It might not work right away.  And it might return a different record than the one above.


!!! Defining zones:

Lets put our zone definitions at the bottom of  {{File{/etc/named.conf}}}.  Be sure to replace my username with yours:

!!!! Forward zone:
This configuration block creates and defines your forward DNS zone:
{{{
zone "merantn.ncs205.net" {
        type master;
        file "/etc/named/master/merantn.ncs205.net.fwd";
};
}}}

!!! Creating Zone files:

* Create your forward and reverse zone files.  Be sure to include:
** A default TTL
*** This must be the first line of the zone file.  See below for an example.
** A SOA record
** NS records for your primary and slave DNS servers
** A and PTR records for each of your hosts (so far we have 3 ~VMs: test, web, and core)
*** Be sure to note that A and PTR records do not belong in the same zone file
** CNAME records for host name __directory__, __ntp__, and __loghost__ pointing to your core VM


Here is my complete forward zone file.  Save this to {{File{/etc/named/master///username//.ncs205.net.fwd}}}
 - You may need to create the {{File{master}}} directory.
 - Replace usernames and IP addresses as necessary.

{{{
$TTL 5m
@ IN  SOA ns1.merantn.ncs205.net. hostmaster.merantn.ncs205.net. (
 2023112200    ; serial number
 1d    ; refresh
 5d    ; retry
 2w    ; expire
 30m    ; minimum
)
                IN  NS  ns1.merantn.ncs205.net.
                IN  NS  ns5.ncs205.net.

ns1             IN      A       192.168.12.26

test            IN      A       192.168.12.24
www             IN      A       192.168.12.25
core            IN      A       192.168.12.26

loghost         IN      CNAME   core
ntp             IN      CNAME   core
directory       IN      CNAME   core
}}}


!!! Allow zone transfers to the slave

We need to grant permission for the slave DNS server to perform a zone transfer against your server.

On your core vm, add a new ACL to the top of  {{File{/etc/named.conf}}}, after the options section:
{{{
acl "slaves" {
        127.0.0.1;              // allow transfer from localhost for testing
	192.168.12.10;		// allow the secondary authoritative name server to perform zone transfers
	192.168.12.x;		// allow your core system to preform zone transfers.  Replace x with the IP address of your core VM
};
}}}

and add this statement to the options section:
{{{
	allow-transfer  { slaves; };
}}}

You can verify your zone configuration with {{Command{named-checkzone //zonename// //filename//}}}
 - eg:  {{Command{named-checkzone merantn.ncs205.net /etc/named/master/merantn.ncs205.net.fwd}}}
 - again, if you make changes that break your zone file and restart the service, you'll have a critical outage.  Verify your configs first!

Restart bind to reload the configuration and test your server:

Be sure to replace my username with yours.
{{{
[root@core ~]# dig www.merantn.ncs205.net @localhost

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> www.merantn.ncs205.net @localhost
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59402
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 2, ADDITIONAL: 2

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;www.merantn.ncs205.net.                IN      A

;; ANSWER SECTION:
www.merantn.ncs205.net. 300     IN      A       192.168.12.25

;; AUTHORITY SECTION:
merantn.ncs205.net.     300     IN      NS      ns1.merantn.ncs205.net.
merantn.ncs205.net.     300     IN      NS      ns5.ncs205.net.

;; ADDITIONAL SECTION:
ns1.merantn.ncs205.net. 300     IN      A       192.168.12.26

;; Query time: 0 msec
;; SERVER: ::1#53(::1)
;; WHEN: Sun Apr 12 02:16:37 EDT 2020
;; MSG SIZE  rcvd: 119
}}}


!!! Configure the slave  (I already did this for each of you)

On ns5.ncs205.net (the slave I manage), I need to add a configuration block similar to the following to named.conf for each student network in the class.  This will configure that server to become a slave for your zones:
{{{
zone "merantn.ncs205.net" {
        type slave;
        file "slave/ncs205.net/merantn";
        masters { 192.168.12.26; };
};
}}}

{{Note{''Note:'' This zone block above is just for your information.  Do not place it into your zone file.  It's purely to demonstrate what needs to be added to ns5, the bridge between your DNS service on your core VM and the outside world.}}}


!!! Verification:

1. Once your zone file is published, you can check the directory {{File{/opt/pub/ncs205/zones/}}} on the class shell server.  You should see a file in that directory that matches your username.
2. You should be able to run the dig command on the shell server for one of your hosts, eg:  {{Command{dig www.merantn.ncs205.net}}}
3. You should be able to run a DNS lookup on your home system for one of your hosts, eg:  
4. The system log {{File{/var/log/messages}}} on your core VM may contain useful ~DNS-related messages.

Testing from my home Windows PC:
{{{
Microsoft Windows [Version 6.1.7601]
Copyright (c) 2009 Microsoft Corporation.  All rights reserved.

C:\Users\nick>nslookup www.merantn.ncs205.net
Server:  one.one.one.one
Address:  1.1.1.1

Non-authoritative answer:
Name:    www.merantn.ncs205.net
Address:  192.168.12.25
}}}

If all of these check out, congrats - your DNS zones are now globally accessible!


!!! DNS client configuration

Once your DNS server is working, modify {{File{/etc/resolv.conf}}} on each of your ~VMs to add your new nameserver and expand the search domains
* We want to query our nameserver first and the lab DNS server second
* We want to search our domain first

Here's mine:
{{{
[root@core ~]# cat /etc/resolv.conf
nameserver 192.168.12.26
nameserver 192.168.12.10
search merantn.ncs205.net ncs205.net
}}}

The //search// keyword in the {{File{resolv.conf}}} is a list of domains to add to unqualified host names.  If you don't specify a fully qualified host name, then those domains will be appended in order until a match is found.  This lets us save some typing and refer to our hosts by their unqualified name instead of having to type out the full domain name each time.

Unqualified:  Just the host name, eg: //core//
Fully qualified domain name:  //core.merantn.ncs205.net//

You can access your ~VMs by host name from anywhere within the lab network once you have DNS set up.

The //ncs205.net// domain is also in the search string on the class shell server.  This means you can easily access your ~VMs from the shell server by entering //host//.//username//.

For example:
{{{
[merantn@shell ~]$ cat /etc/resolv.conf
nameserver 192.168.12.10
search ncs205.net

[merantn@shell ~]$ ssh core.merantn -l root
root@core.merantn's password:
Last login: Sun Apr 12 13:15:11 2020 from 192.168.12.10
[root@core ~]#
}}}


!! Misc DNS topics

Don't forget to update your serial number after making changes!
- Notifications are broadcast to slave NS only if the serial number increments
- Records may be stale if you forget to increment the serial

You can use the command {{Command{rndc reload}}} to refresh your DNS zones after updates are made.
!! [[Lab 60 - SSL Certificates]]
Assigned [[Week 15, Part 1]]

Follow the directions above to add SSL encryption support to your web server.

Also complete and submit the [[Lab 60|labs/lab60.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab 60 - Scheduled Tasks]]
Assigned [[Week 13, Part 1]]


!!! SSL Certificate renewal

Our lab web server SSL certificate will expire in 90 days.  This isn't much of an issue for us because class will have ended and this server will be decommissioned by then.

But if we were doing this for real, renewing that SSL certificate would be a task we would need to account for.  SSL certificates created with the {{Command{acme.sh}}} tool can be renewed by running the command with the {{Monospaced{&#45;-cron}}} option.  Any certs in its configuration will be checked for upcoming expiration and automatically renewed if they are about to expire.

We don't want to worry about running this manually and potentially forgetting about it.  We can instead use cron to run this command for us at a set interval.


!!! Cron:

Create the following cron task on your www VM:

* Schedule {{Command{acme.sh &#45;-cron}}} to run every other day at 6pm.
* Save your scheduled job to the file {{File{/etc/cron.d/acme}}}

Also complete and submit the [[Lab 60|labs/lab60.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
* ''Note:'' Lab 60 is a late addition to the semester, so it's getting a high number.  
!! [[Lab 61 - Scheduled Tasks]]
Assigned [[Week 15, Part 1]]


!!! SSL Certificate renewal

Our lab web server SSL certificate will expire in 90 days.  This isn't much of an issue for us because class will have ended and this server will be decommissioned by then.

But if we were doing this for real, renewing that SSL certificate would be a task we would need to account for.  SSL certificates created with the {{Command{acme.sh}}} tool can be renewed by running the command with the {{Monospaced{&#45;-cron}}} option.  Any certs in its configuration will be checked for upcoming expiration and automatically renewed if they are about to expire.

We don't want to worry about running this manually and potentially forgetting about it.  We can instead use cron to run this command for us at a set interval.


!!! Cron:

Create the following cron task on your www VM:

* Schedule {{Command{acme.sh &#45;-cron}}} to run every other day at 6pm.
* Save your scheduled job to the file {{File{/etc/cron.d/acme}}}

Also complete and submit the [[Lab 61|labs/lab61.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab 62 - VM Lockdown - Secure your VMs]]
Assigned [[Week 15, Part 2]]

!!! Add user accounts to all ~VMs

Add two local user accounts to your ~VMs
* First account - set the user name to your campus username
** UID = 1000
** GID = 100
** Set a valid password
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
* Second account - username = merantn
** UID = 7289
** GID = 100
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
** Copy my SSH public key (see below) to the user's {{File{~/.ssh/authorized_keys}}} file
*** You will likely need to create the directory and file
*** Be sure you understand how SSH key-based authentication works.
** Use this password hash (encrypted password):  
{{{
$6$nmUnix22$FCHlRIf.MFckb664yGEMGIC09cxfIk6NO/6fz/ou5EBbLQuo5.J0.szsg7aRswSIvxVjPGYWhiQ2XKD62eg4Y0
}}}

{{Warning{''Warning:'' Many students seem to have difficulty setting my hash correctly.  These lab verification worksheets also provide a good opportunity to validate your work.  Pay close attention to the output of Question 5 to ensure my hash is properly displayed.}}}


* Verify permissions:
** Both user's home directories and all files below them must be owned by the user and GID 100
** The user's home directory must have proper directory permissions - it must not be writable by the group or others for proper SSH function.
* Verify ~SELinux
** ~SELinux must be disabled for SSH public key authentication to function properly
** Edit {{File{/etc/selinux/config}}} and change {{Monospaced{enforcing}}} to {{Monospaced{disabled}}} on line #7 to disable ~SELinux on system startup
** Execute {{Command{setenforce 0}}} to disable ~SELinux for the current boot
** This may already have been completed.

My SSH public key
{{{
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBVLQcFklXcim/xylMML4QnLy4iuzrdgOUWivktOAlNX merantn@shell.ncs205.net
}}}

{{Note{''Note:'' You can test logins to your ~VMs using my user account by creating your own SSH keypair and adding your SSH public key to the {{File{~/.ssh/authorized_keys}}} file in my home directory on your VM.  See the directions in [[Working more efficiently with GNU screen & SSH keys]] for how to create an SSH keypair.  The {{File{authorized_keys}}} file can contain multiple public keys.  Any of the corresponding private keys will be accepted for login.}}}


!!! Disable direct root login via SSH on all ~VMs

# Adjust the sshd configuration to disable direct root logins.  All users must first login as a regular, unprivileged user and then elevate privileges.  
** Look for the {{Monospaced{~PermitRootLogin}}} configuration option in {{File{/etc/ssh/sshd_config}}}
# Adjust PAM to require wheel group membership in order to su to root
** Look in {{File{/etc/pam.d/su}}}
# Don't forget to add both user accounts to the wheel group

{{Warning{''Warning:'' When messing with authentication, it's always wise to verify everything works before logging out.  Open a new putty window, ssh in, and elevate up to a root prompt before disconnecting from your original putty session.  Otherwise, if you log out and something is broken, you may have difficulty accessing the system.}}}

!!! Verification Worksheet

Also complete and submit the [[Lab 62|labs/lab62.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!![[Lab 63 - sudo]]:  apache configuration access
Assigned [[Week 15, Part 2]]

We would like to provide the webmaster the ability to update the apache configuration and restart the service on the web server virtual machine without granting full root level access.  The {{Command{sudo}}} and {{Command{sudoedit}}} utilities can be use to accomplish this.

!!! Create a webmaster user on your web server VM
* username = wes
* uid = 2000
* gid = 100
* Fully configure the environment for this user
* Use the password hash {{{$6$IamWes205$JGDoWDKSXqdeHfFXYrPvG6vZQayMPMaCA8p/NlMFf1.Pu.mnHHh18H38B7HOxt2X3Z5dwiyS9jwYYXPHZJamt0}}}

!!! Create a new group for the webmasters
* group name = webmaster
* gid = 1000
* add wes to this group

!!! Configure {{Command{sudo}}} / {{Command{sudoedit}}} to:
# Grant the user ''wes'' the ability to edit the primary apache configuration file
# Grant the user ''wes'' the ability to execute the {{Command{apachectl}}} command as root.

{{Warning{''Warning:'' Be sure you understand why {{Command{sudoedit}}} is used for modifying root-owned files instead of just {{Command{sudo}}} followed by an editor, eg: {{Command{sudo vi /etc/httpd/conf/httpd.conf}}}. }}}


!!! Verification Worksheet

Also complete and submit the [[Lab 63|labs/lab63.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is ready to be evaluated for this lab.
!! [[Lab 64 - Enable Two-Factor Authentication]]
Assigned [[Week 15, Part 2]]

Passwords are increasingly proven to be insufficient as the sole means of authentication.  Passwords are too easily phished, captured via shoulder surfing or key loggers, or stolen from data breeches.  We also generally do a poor job of selecting passwords.  Password effectiveness is greatly reduced due to reuse across multiple sites and the selection of poor quality, weak passwords.  Strong, secure passwords should be unique and contain a minimum of 12 random characters across the full alphabetic, numeric, and symbol character space.  This then makes them difficult to remember.

These shortcomings can be mitigated with the use of multifactor authentication.  Utilizing a hardware token is ideal.  Google recently [[made the news|https://krebsonsecurity.com/2018/07/google-security-keys-neutralized-employee-phishing/]] for introducing hardware tokens for their employees to access corporate resources with great success.  The Google-made [[Titan Security Key|https://cloud.google.com/titan-security-key/]] is now available for general purchase.  [[YubiKeys|https://www.yubico.com/store/]] are another popular alternative for general use.  Such keys can easily be used to add multi-factor authentication to operating system logins, services, or web sites after these systems are enabled to support hardware tokens.

Soft tokens are available as a free alternative to hardware tokens.  A soft token is a desktop or mobile application which generates a one-time pin which can be entered along with a password to prove identity.  Instead of a token on your keychain, your desktop or phone becomes "something you have".  Multi-factor authentication should be used for any services where a higher level of security is warranted due to an increased exposure to attack.

Google Authenticator ([[Android|https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2&hl=en_US]] or [[Apple|https://itunes.apple.com/us/app/google-authenticator/id388497605?mt=8]]) is a popular soft token with wide support.  The Google Authenticator can be used as a second factor for ssh authentication to Linux servers.  

If not for our class's virtual lab infrastructure protecting us from the outside world, our class ~VMs would all otherwise be exposed to the internet and open to attack.  Any externally accessible server with such an increased exposure to attack would necessitate the deployment of multi-factor authentication.

!!! We will now implement two-factor authentication using Google Authenticator for access to our core VM.

{{Note{''Note:''  Two-factor authentication with the Google Authenticator will be set up for your regular user, not the root account.}}}

* Ensure your user account exists on your core VM and you are able to authenticate with a password.
** None of this will work if your user account is not fully functional
* Get started by installing the Google Authenticator app on your phone.  
* We must next generate a barcode or key to add to the Google Authenticator App.  
** Log in to your core VM via SSH and elevate to root
*** Ensure the {{Monospaced{epel-release}}} package is installed on your core VM
*** Install the {{Monospaced{google-authenticator}}} package on your core VM
** Exit the root login and log in to your core VM as a regular user
*** Run the command {{Command{google-authenticator}}} to initialize the token
*** Answer ''y'' to the question: ''Do you want authentication tokens to be time-based (y/n)''

You will be presented with a QR code to scan from the Google Authenticator app on your phone along with a secret key and a series of emergency scratch codes.  The secret key can be used to add this account to the Google Authenticator in case you are unable to scan the barcode.  Emergency scratch codes should be stored somewhere safe and are used to authenticate in case you lose your phone.
** Save the secret Key.  We'll need it later.

Next, on your phone, launch the Google Authenticator app and choose the option to scan a barcode or enter a key and provide the appropriate input.

[img[img/googleauth1.png]]

Return to your VM and answer the remaining questions:

{{Monospaced{Do you want me to update your "/home/merantn/.google_authenticator" file? (y/n) ''y''}}}

{{Monospaced{Do you want to disallow multiple uses of the same authentication token? This restricts you to one login about every 30s, but it increases your chances to notice or even prevent man-in-the-middle attacks (y/n) ''y''}}}

{{Monospaced{By default, a new token is generated every 30 seconds by the mobile app. In order to compensate for possible time-skew between the client and the server, we allow an extra token before and after the current time. This allows for a time skew of up to 30 seconds between authentication server and client. If you experience problems with poor time synchronization, you can increase the window from its default size of 3 permitted codes (one previous code, the current code, the next code) to 17 permitted codes (the 8 previous codes, the current
code, and the 8 next codes). This will permit for a time skew of up to 4 minutes between client and server. Do you want to do so? (y/n) ''n''}}}

{{Monospaced{If the computer that you are logging into isn't hardened against brute-force login attempts, you can enable rate-limiting for the authentication module. By default, this limits attackers to no more than 3 login attempts every 30s. Do you want to enable rate-limiting? (y/n) ''n''}}}

{{Warning{''Warning:'' Answering no to the last question is a poor security choice.  If we were implementing this in a production environment we would answer yes to enable rate-limiting.  We are only answering no because we are testing something new and do not want to lock ourselves out in the process.}}}

The file {{File{~/.google_authenticator}}} will contain your 2FA configuration.

You should now have the Google Authenticator app installed on your phone and an account configured for use.  Next we must configure the operating system to require this second form of authentication for SSH logins.  We will not modify the configuration for Console logins, so if things go wrong we can always log in through the Proxmox console to fix it.

!!! Configure the core server to require two-factor authentication

* Escalate to root privileges

* Edit the file {{File{/etc/pam.d/sshd}}}  and add the following line to the bottom:

{{{
auth required pam_google_authenticator.so nullok
}}}

* Edit the file {{File{/etc/ssh/sshd_config}}} and search for //~ChallengeResponseAuthentication// .  Ensure the value is set to ''yes'':

{{{
ChallengeResponseAuthentication yes
}}}

* Save and close the file, then restart the sshd service:


Finally, ''without logging out'', attempt to log in to your core VM from itself.  Launch the Google Authenticator App to generate a new token.  When making changes to remote connection services, we do not want to log out until we can verify those changes are functioning properly.  If we disconnect and something went wrong, we might end up locked out!

[img[img/googleauth2.jpg]]

Logging in with two-factor authentication:
{{{
[merantn@core ~]$ ssh localhost -l merantn
Password:
Verification code:
Last login: Sun Apr 19 00:22:40 2020 from localhost
[merantn@core ~]$
}}}

With the Google Authenticator changes in place, I'm prompted for my password as usual along with the verification code from the Authenticator App.  ''Note:'' Each code is valid only once to prevent replay attacks.  Once you log in, you may need to wait up to 30 seconds for a new code to be generated before you can log in again.


!!! Verification Worksheet

Also complete and submit the [[Lab 64|labs/lab64.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
Mastery of this subject material will only come with practice. To that end, this will be a very hands-on and practical course. Expect graded lab assignments regularly to provide ample practice with the assigned material.  Properly completing lab assignments on time is necessary to receive a good grade for this course. Not competing lab assignments at all will likely result in a failing grade.

Any deliverables will be collected for review no sooner than their due date. Late assignments may be accepted, subject to time-dependent grade penalty of up to 50%. Presentation of submitted assignments will also impact grade.

{{Note{''Note:'' It is much better to have correct work submitted late than obviously wrong or incomplete work submitted on time.  If you're having trouble with some of the material and need more time, please let me know and we can discuss adjusting due dates.  Submitting poor quality work to meet a due date is not a wise professional strategy and will receive harsher grading.}}}


!! Submitting Homework Assignments
Homework assignments are to be uploaded to the class shell server using a file transfer program like ~WinSCP and saved to the directory {{File{/opt/pub/ncs205/submit/}}}. I will then grade/annotate your work and return the files to you for review. Most homework assignments will be PDF forms to complete. Download the lab PDF and open it in [[Acrobat Reader|https://get.adobe.com/reader/]].  ''Be careful using the PDF viewer in your web browser''.  Some browsers will not properly save the file and you will upload a blank document.  Grades will be posted to Brightspace.

After downloading the PDF assignment and opening the file in [[Acrobat Reader|https://get.adobe.com/reader/]], add your name to the top, fill in your responses, then save & close the file.  It would be wise to reopen the PDF in Acrobat Reader to make sure everything saved correctly before uploading to the server.  You should be in the habit of verifying your work before submitting it.

Files must be named appropriately so we don't have filename collisions among everyone's uploaded files. Rename your PDF document following this naming convention: {{File{''ncs205-lab#-username.pdf''}}}
* replace # with the lab number
* replace username with your campus username

Uploaded labs ''must'' contain your name at the top of the document and their file names ''must'' follow this file name format __exactly__ in order to be graded. This includes case - all letters must be lowercase. The Unix operating systems are case sensitive, so {{File{~NCS205-lab1-jdoe12.pdf}}} is a different file than {{File{ncs205-lab1-jdoe12.pdf}}}.  The former would not be accepted for review.

{{Warning{''Warning:'' The Microsoft Windows operating system hides file extensions by default.  This is a terrible setting for a security practitioner and should be disabled.  A common mistake is to fail to take this into account and upload files with a double extension, such as {{File{ncs205-lab1-jdoe12.pdf.pdf}}}.  This file would not be named correctly and thus not accepted for review.}}}

!! How to upload your lab assignments:
--A video will be posted here demonstrating the process in the coming days.--  Please let me know if you have trouble figuring this out.

!! Late Penalties
Point penalties for late lab assignments will be assessed as follows:

|!Penalty|!Condition|
| 0 |Sneak it in past the due date but before I grade the labs|
| 10% |Submitted after the batch has been graded|
| 20% |Submitted after graded labs have been returned|
| 30% |Submitted after we've reviewed a lab|
| 40% |Submitted after I've posted a review video or we've held an online meeting to discuss a lab.|

{{Warning{''Note:'' Labs 1 through 25 will not be accepted after the last date to Withdraw from the course unless prior approval is obtained.}}}

!! Common point deductions

!!! {{Command{cat}}} abuse

It is common for new students to abuse the {{Command{cat}}} command and use it unnecessarily with command strings like {{Command{ cat //file// | grep //string// }}}.  In this example, the {{Command{grep}}} command will accept a filename argument and should be represented as {{Command{grep //string// //file//}}}.  This seems to be a common bad habit to break.  Abusing the {{Command{cat}}} command and using it where it provides no value will result in a 10% point penalty per lab.


!! The grading workflow
# You upload a completed lab PDF to {{File{/opt/pub/ncs205/submit/}}} on the class shell server
# Every hour a script will collect new lab submissions which are properly named and copy them to the grading queue, {{File{/opt/pub/ncs205/queue/}}}.
## An accepted lab will be moved from the {{File{submit/}}} directory to the directory {{File{/opt/pub/ncs205/submit/collected/}}}
## Any improperly named files will not be accepted and remain in the {{File{submit/}}} directory for one week.  They will then be moved to {{File{/opt/pub/ncs205/submit/invalid/}}}
## ''Note:'' The collection script may occasionally be manually executed between the scheduled hourly runs
# The grading queue will be synchronized to my tablet for review.
# Any annotations will be recorded and synchronized back to the shell server, saved to the directory {{File{/opt/pub/ncs205/graded/}}}.
# Grades are entered to Brightspace.
# After grades are entered, the script will move graded labs ready to be returned to your directory within {{File{/opt/pub/ncs205/returned/}}}. You may download them from this directory to see entered grades and my annotations.
The directories {{File{/opt/pub/ncs205/queue/}}} and {{File{/opt/pub/ncs205/graded/}}} are staging directories in the workflow pipeline.  You can view the contents of these directories but cannot write to them.  Your access is only so you can have full visibility on where your labs reside in the workflow.

tl;dr: You upload new labs to {{File{/opt/pub/ncs205/submit/}}} and retrieve graded copies from within {{File{/opt/pub/ncs205/returned/}}}.


!! Extra Credit Labs

Extra material which was written for other courses or removed from this course is available for extra credit.  Extra credit material will be posted to the main page of the class website and their labs will be denoted with a letter in their number, for example Lab A1.  Submit these labs as you normally would with the letter portion of the lab number represented in uppercase.

Extra credit labs will be graded on the same 10-point scale as regular labs and will be tracked in their own group.  At the end of the semester, 10% of the extra credit group grade will be applied to your final course average.  This bonus cannot exceed one minor-level grade boost.  For example, a B may become a B+ but cannot become an A-.

Any instance of academic dishonesty will result in loss of eligibility of extra credit.
!![[Lab E1 - Bring Files VM online]]
Assigned [[Week E]]

Bring your files VM online:
* A new VM was added for you
* Assign it the 4th IP address in your range
* Add the hostname {{Monospaced{''files.//username//.ncs205.net''}}} to the file {{File{/etc/hostname}}}
* Add an A record to your DNS zone for this new VM
* Reboot the VM to ensure all network settings were properly applied
* Install the standard software packages
* Apply any outstanding updates
* Configure NTP to synchronize time against your core VM and ensure time is fully synchronized
* Apply the steps in [[Lab 62 - VM Lockdown - Secure your VMs]] to harden this VM.


!!! Verification Worksheet

Also complete and submit the [[Lab E1|labs/labE1.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!![[Lab E2 - Logical Volume Manager]]
Assigned [[Week E]]

Complete the steps in the [[Lab E2 Instructions|labs/labE2-instructions.pdf]] PDF on your files VM to become familiar with the Linux logical volume manager.

Add additional filesystems to your core VM server
* See the last page in the [[Lab E Instructions|labs/labE-instructions.pdf]]
* Complete the [[Lab E2 Deliverable|labs/labE2.pdf]] and submit this PDF to {{File{/opt/pub/ncs205/submit/}}} on the class shell server

This lab will involve restarting your file server VM.  Be sure the necessary services are configured to start on boot and ~SELinux and firewalld are properly configured.
!![[Lab E3 - Storage Expansion]]
Assigned [[Week E]]

Some systems need additional storage beyond what was initially provisioned.  Here, we have a file server VM that was created with an additional disk.  We now need to make that additional disk available to the operating system for storing additional data.

Perform the following steps on your files VM.

!!! Observe available storage devices

The {{Command{lsblk}}} command is a quick way to visualize all storage devices available to a system.  Here, we can see that there are two unallocated drives - {{File{vdb}}} and {{File{vdc}}}.  We'll use {{File{vdb}}} for this lab and leave {{File{vdc}}} alone.
{{{
[root@files ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom
vda             252:0    0   16G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   15G  0 part
  ├─centos-root 253:0    0 13.4G  0 lvm  /
  └─centos-swap 253:1    0  1.6G  0 lvm  [SWAP]
vdb             252:16   0    2G  0 disk
vdc             252:32   0    2G  0 disk
}}}

!!! Create vdb1 Partition

It's generally preferred to create partitions on the drives instead of using the bare device.  Partitions are logical divisions of the physical disk that will then hold the filesystem.  Here, we're going to devote the entire disk to a single partition and a single filesystem.  Creating multiple partitions on a disk allow it to hold separate filesystems.  In some instances, physically dividing groups of files into separate filesystems is preferred.  One example is logs.  If you have a system, such as a webserver, that may generate a lot of logs, it's wise to store those logs on their own filesystem.  If everything is stored on the same filesystem, excessive logs could fill the disk and interfere with the database's ability to store new data.

Refer to the //Storage Layers// diagram.  We'll be following the path on the left from Storage Devices to Partitions to Filesystems.
[img[img/storage-layers.jpg]]

Duplicate this interaction with the {{Command{parted}}} command to create a new disk label and new partition.  The first {{Command{print}}} command shows the disk is currently bare.
{{{
[root@files ~]# parted /dev/vdb
GNU Parted 3.1
Using /dev/vdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) print
Error: /dev/vdb: unrecognised disk label
Model: Virtio Block Device (virtblk)
Disk /dev/vdb: 2147MB
Sector size (logical/physical): 512B/512B
Partition Table: unknown
Disk Flags:
(parted)
}}}
{{{
(parted) mklabel gpt
(parted) mkpart
Partition name?  []? storage
File system type?  [ext2]? xfs
Start? 1
End? 100%
(parted) quit
Information: You may need to update /etc/fstab.

[root@files ~]#
}}}


Now run {{Command{ lsblk }}} to verify the new partition was created.  It's always wise to add verification steps as you proceed instead of just blindly assuming everything is working as it should.  If you compare this output to the one above, you'll see that the {{File{ vdb1 }}} partition has been created.
{{{
[root@files ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom
vda             252:0    0   16G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   15G  0 part
  ├─centos-root 253:0    0 13.4G  0 lvm  /
  └─centos-swap 253:1    0  1.6G  0 lvm  [SWAP]
vdb             252:16   0    2G  0 disk
└─vdb1          252:17   0    2G  0 part
vdc             252:32   0    2G  0 disk
}}}


!!! Create the filesystem

We can see from the {{Command{ lsblk }}} command that the new partition, {{File{vdb1}}}, has been successfully created.  Now we must put a filesystem on it.  Partitions are the physical divisions of a disk.  Filesystems are the data structures the operating system interacts with in order to store files.
{{{
[root@files ~]# mkfs.xfs /dev/vdb1
meta-data=/dev/vdb1              isize=512    agcount=4, agsize=130944 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=523776, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
}}}


!!! Create the mount point

A mount point is a representation of the filesystem that we can interact with.  On Windows systems, mount points are generally drive letters, like C:\ or D:\.  In the Unix/Linux world, everything is one big filesystem tree.  Linux mount points are directories on that tree.  We identify a directory to mount our new filesystem to, and then any interaction with that directory and all items within it will be directed to our new disk volume.  Here, we want to make our new disk available to the system at the directory {{File{ /opt/storage/ }}}.

We first need to ensure the new mount point exists:
{{{
[root@files ~]# mkdir /opt/storage
}}}


!!! Edit the filesystem table

The ''f''ile''s''ystem ''tab''le, {{File{/etc/fstab}}}, is the configuration file which specifies which disk volumes are mounted at system startup.  Add your new disk volume to the file so it is mounted on boot.

Here's a copy of my {{File{ /etc/fstab }}} file.  The last line is the one you need to copy to yours.  Each line contains:
* the physical volume, {{File{ /dev/vdb1 }}}
* the mount point, {{File{/opt/storage }}}
* the filesystem type, {{Monospaced{ xfs }}}
* any special mount options.  Here, just the {{Monospaced{ defaults }}}
* a binary value ({{Monospaced{0}}} or {{Monospaced{1}}}) to indicate whether the filesystem should be backed up.  This is largely deprecated.
* the order in which filesystem checks ({{Command{fsck}}} command) should be run.  A value of {{Monospaced{ 0 }}} disables these checks
{{{
[root@files ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Fri Mar 13 00:03:20 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=f68b9069-7271-48de-b968-00d62e825144 /boot                   xfs     defaults        0 0
/dev/mapper/centos-swap swap                    swap    defaults        0 0

/dev/vdb1               /opt/storage      xfs   defaults        0 0
}}}


!!! Mount the new filesystem

Changes to the {{File{/etc/fstab}}} file should be tested and filesystems mounted with the {{Command{ mount -a }}} command.  This will catch any errors in the file.  If there is an error mounting a filesystem on system startup, the OS will not fully load and your only option will be to fix the problem on console.  This can be a nasty surprise if you don't have easy access to console.

The {{Command{df -h}}} command adds a verification step that the filesystem is fully mounted and accessible.  The old proverb //trust, but verify// must apply to everything you do.
{{{
[root@files ~]# mount -a

[root@files ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 232M     0  232M   0% /dev
tmpfs                    244M  120K  244M   1% /dev/shm
tmpfs                    244M   29M  215M  12% /run
tmpfs                    244M     0  244M   0% /sys/fs/cgroup
/dev/mapper/centos-root   14G  2.4G   12G  18% /
/dev/vda1               1014M  228M  787M  23% /boot
tmpfs                     49M     0   49M   0% /run/user/7289
/dev/vdb1                2.0G   33M  2.0G   2% /opt/storage
}}}


!!! Verification worksheet

You should now have successfully added a new storage volume to your files server VM.  Complete and submit the [[Lab E3|labs/labE3.pdf]] verification worksheet when you are ready for review.
!![[Lab E4 - Monitoring disk usage with Nagios]]
Assigned [[Week E]]

Naemon infrastructure monitoring checks are performed through a series of plugins.  Naemon plugins are shell scripts or binary executables which perform their specific check and return an exit status and their results to the Naemon service.

Currently the majority of our naemon checks are for services running on our ~VMs.  The Naemon plugins interact with these services in a normal fashion and records its findings for display on the web interface.  Occasionally it is necessary to examine a system internally, beyond the reach of our standard external plugins.  The Naemon Remote Plugin Executor (NRPE) is a service which can be run on a system being monitored to provide a central Naemon server the ability to execute internal plugins and examine the target system from the inside.  The Naemon server will communicate with NRPE and request it run a local plugin residing on the target system.  NRPE will then return the results of that plugin to the Naemon monitoring service.  

Certain checks, such as those examining the amount of free space available on a system, can only be performed via NRPE and a plugin residing on the system being checked.  Monitoring the amount of free space in your log and home directory partitions is of special importance in order to prevent log information loss or user data loss.

Install NRPE and the necessary naemon plugin on each of your production ~VMs.  Configure NRPE to grant our Naemon server permission to access it and configure the disk check plugin to monitor the {{File{/var/log}}} and {{File{/home}}} filesystems.


!!! 1. Install the EPEL repository

Install the Extra Packages for Enterprise Linux (EPEL) yum repository.  The extra Nagios utilities and plugins are not available via the standard yum repo that ships with ~CentOS.

{{Command{yum install epel-release}}}


!!! 2. Install the nrpe service and nagios disk check plugin via yum.  Note:  Nagios & Naemon are two different monitoring tools that are comparable with each other and share the same plugins.

{{Command{yum install nrpe nagios-plugins-disk}}}


!!! 3. Edit the {{File{/etc/nagios/nrpe.cfg}}} config file:

Search for the ''allowed_hosts'' configuration directive and grant the local system, the class shell server, and the Naemon server permission to access it:
{{{
allowed_hosts=127.0.0.1,192.168.12.10,192.168.12.15
}}}

Add these three lines to the end of the command definitions (towards the bottom), creating disk check commands for {{File{/}}}, {{File{/var/log/}}}, and {{File{/home/}}}.  The Naemon server will execute these configured commands via the NRPE service.
{{{
command[check_disk_root]=/usr/lib64/nagios/plugins/check_disk -w 15% -c 8% -p /
command[check_disk_log]=/usr/lib64/nagios/plugins/check_disk -w 15% -c 8% -p /var/log
command[check_disk_home]=/usr/lib64/nagios/plugins/check_disk -w 15% -c 8% -p /home
}}}


!!! 4. Start the NRPE service now and on boot:

Start on boot: {{Command{ systemctl enable nrpe }}}
Start now:  {{Command{ systemctl start nrpe }}}
Verify: {{Command{ systemctl status nrpe }}}


!!! 5. Install the NRPE Nagios plugin

This is the plugin used by the Naemon server to call remote commands via NRPE.  Normally this plugin is only installed on the Naemon server.  We're installing in on our VMs for testing.

{{Command{ yum install nagios-plugins-nrpe }}}


!!! 6.  Test

Execute the plugin to test your NRPE instance.  The string returned is what would be reported back to Naemon and what will be displayed on the Naemon web interface.

{{Command{ /usr/lib64/nagios/plugins/check_nrpe  -H 127.0.0.1 -c check_disk_root }}}
{{Command{ /usr/lib64/nagios/plugins/check_nrpe  -H 127.0.0.1 -c check_disk_log }}}
{{Command{ /usr/lib64/nagios/plugins/check_nrpe  -H 127.0.0.1 -c check_disk_home }}}


!!! 7.  Adjust the firewall

You should be running a host-based firewall on your ~VMs.  Don't forget to update the ~FirewallD lab worksheet and adjust the firewall on your ~VMs to allow traffic to the NRPE service.  This service listens on TCP/5666.


!!! Verification Worksheet

Also complete and submit the [[Lab E4|labs/labE4.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab H - Host-based Firewalls]]
Assigned [[Week H]]

!!! Implement a host-based firewall on your ~VMs

* Complete the [[Lab H|labs/labH.pdf]] worksheet and upload it to the class shell server
* Take note of the state of your services in Naemon.  It's always a good idea to have a known baseline of what things look like before making network changes.  Taking a screenshot may be helpful.
** If something is down after you make changes and you don't know what things looked like before, you won't know if your change was the reason for the outage.
* Enable the firewalld service so it starts on boot and start the service now
* Request a scan of your services on Naemon.  Take note of any changes to the alarms.
* Add the firewall rules you identified in the Lab H PDF.
* Recheck your services in Naemon and ensure all new alarms have cleared.
!![[Lab X - Containerization with Docker]]
Assigned [[Week X, Part 1]]

Defining infrastructure as code is another advantage of Docker containerization.  With this concept, the services we need to run on our server are all clearly defined in a single configuration file.  Once defined, these services can be brought online with just a few commands.  Defining our infrastructure in this fashion makes it much easier to document our server configuration, track our infrastructure changes over time, and replicate our configuration elsewhere.  

Recall the work we previously did to install the database and wiki.  Several commands were executed to install and start the services, configure Apache, create the database accounts, download & unpack the wiki, and bring everything online for use.  Now, a single infrastructure definition file will contain our requirements to run the wiki and database in separate Docker containers and make them available for use.

Complete the following steps on your www VM:


!!! 1. Create our working directory

Create the directory {{File{/opt/docker/}}} to use as the storage location for our files.  All work will be performed within this directory.


!!! 2. Define the infrastructure

A {{File{docker-compose.yml}}} file contains the definition for our services and is another way to launch and maintain Docker containers.  This file also fully documents the requirements and changes being made to support these services.  Complex {{File{docker-compose.yml}}} files can be used to define an entire server running several different services within containers.  Should we need to reinstall or rebuild the underlying OS, our containers and all dependencies are fully documented.  It would just be a matter of copying our {{File{docker-compose.yml}}} file and all required data volumes to the new system in order to quickly replicate it.

Create the {{File{/opt/docker/docker-compose.yml}}} file containing the following contents.  Adjust the file to set your own database username and password.

{{{
[root@www docker]# cat docker-compose.yml
version: '3'

networks:
  wiki_network:

services:
  mediawiki:
    image: mediawiki
    container_name: mediawiki
    ports:
      - 8080:80
    links:
      - mariadb
    networks:
      - wiki_network
    volumes:
      - /var/www/html/images
      # After initial setup, download LocalSettings.php to the wiki directory, remove the below comment, and restart the container
      # - ./wiki/LocalSettings.php:/var/www/html/LocalSettings.php
    restart: always

  mariadb:
    image: mariadb
    container_name: mariadb
    networks:
      - wiki_network
    volumes:
      - ./mariadb:/var/lib/mysql
    environment:
      # See https://phabricator.wikimedia.org/source/mediawiki/browse/master/includes/DefaultSettings.php for other possible settings
      MYSQL_DATABASE: mediawiki
      MYSQL_USER: wikiuser
      MYSQL_PASSWORD: example_password
      MYSQL_RANDOM_ROOT_PASSWORD: 'yes'
    restart: always
}}}

{{Warning{''Warning:'' Indentation within the docker-compose file is very specific.  Like Python, indentation is used to establish nesting of configuration items.  The indentation levels of this file must be properly preserved.}}}


!!! 3. Create database directory

As you can see from the definition above, the database files are stored in a local directory easily accessible to us.  This also makes it easier if we wanted to back up just these database files.

Create the directory {{File{/opt/docker/mariadb/}}}.  Any storage must be saved outside of the container, either to a local directory similar to this one or to a Docker volume.  Storing any files which would changed, like database files, outside of the container ensures that data is available if the container was recreated.  If the database files were stored within the container, all data would then be lost of the container was rebuilt or upgraded to a newer version.

Local storage is defined with the volumes tag, as seen in the example above.


!!! 4. Start containers 

Run the command {{Command{docker-compose up -d}}} to bring our containers online.  Be sure you're currently in the {{File{/opt/docker/}}} directory.


!!! 5. Validate images

The containers should be downloaded from the Docker hub and should be visible:

{{{
[root@www docker]# docker images
REPOSITORY   TAG       IMAGE ID       CREATED       SIZE
mariadb      latest    1de5905a6164   4 days ago    410MB
mediawiki    latest    c8ce33ea98e9   2 weeks ago   809MB
}}}


!!! 6. Validate running containers

We should also see the new containers fully online.

{{{
[root@www docker]# docker-compose ps
  Name                 Command               State                                                Ports
-----------------------------------------------------------------------------------------------------------------------------------------------------
mariadb     docker-entrypoint.sh mariadbd    Up      3306/tcp
mediawiki   docker-php-entrypoint apac ...   Up      0.0.0.0:8080->80/tcp,:::8080->80/tcp
}}}


!!! 7. Observe database files:

Run the command {{Command{ls -l mariadb}}} to view the database files.


!!! 8. Obtain the generated root password

Our database service is configured to generate a unique root password the first time the container starts up.  Check the Docker logs to find the new root password and save this value in case we need it later.

This command demonstrates finding my database root password.  Yours will be different:

{{{
[root@www docker]# docker logs mariadb 2>&1 | grep ROOT
2022-05-01 01:45:43+00:00 [Note] [Entrypoint]: GENERATED ROOT PASSWORD: izp#st9p7`a_+Y<@:xIc&v=lEF`NG~%G
}}}


!!! 9. Validate database connection

Before proceeding, it would be prudent to ensure you are able to access the ~MariaDB database running inside the container.  Execute the following command to log in to the database.
* {{Monospaced{ ''-u'' }}} refers to the value set in the {{Monospaced{~MYSQL_USER}}} environment variable in your {{File{docker-compose.yml}}} file.
* The final argument, {{Monospaced{//mediawiki//}}}, is the name of the database you specified in the {{Monospaced{~MYSQL_DATABASE}}} variable.
* The password you are prompted to enter will be the value set in the {{Monospaced{~MYSQL_PASSWORD}}} variable.

{{{
[root@www docker]# docker exec -ti mariadb mysql -u wikiuser -p mediawiki
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 10.7.3-MariaDB-1:10.7.3+maria~focal mariadb.org binary distribution

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [mediawiki]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mediawiki          |
+--------------------+
2 rows in set (0.002 sec)

MariaDB [mediawiki]> exit;
}}}


!!! 10. Configure wiki

Your services should now be online with a copy of the ~MediaWiki and required database running in containers.  Access your wiki and configure it, similar to how we did it last time.  The URL {{Monospaced{htt&#112;:&#47;/192.168.12.''X'':8080}}} will load your new containerized wiki.  Replace the ''X'' with the last octet for your web server VM.

Once the site is loaded, go through the wiki configuration as you did last time and generate the {{File{~LocalSettings.php}}} file.  
* Be sure to set {{{Database Host}}} to {{{mariadb}}} when prompted for the database information


!!! 11. Add {{File{~LocalSettings.php}}} configuration file

a. Create the directory {{File{/opt/docker/wiki/}}} on your www VM and upload your new {{File{~LocalSettings.php}}} file to it.
b. Uncomment the following line in your new {{File{docker-compose.yml}}} file:

This will make the wiki configuration file available within the container

{{{
      # - ./wiki/LocalSettings.php:/var/www/html/LocalSettings.php
}}}

c. Restart the wiki container to activate the volume change.  This command must be executed from within the {{File{/opt/docker/}}} directory.

{{Command{docker-compose up -d mediawiki}}}


!!! 12. Validate your new, containerized wiki 

Access your wiki again through the tunnel to ensure everything is set up correctly and it is online for use.


!!! 13. Submit the deliverable

* Complete [[Lab 55|labs/lab55.pdf]] and submit this PDF to {{File{/opt/pub/ci233/submit/}}} on the class shell server
** This lab is due Saturday
| !Character | !Shortcut | !Most Useful |
| ~CTRL-C |Send interrupt signal to a running command (abort)| * |
|~|Clear entered command line text|
| ~CTRL-A |Move cursor to beginning of command line| * |
| ~CTRL-E |Move cursor to end of command line| * |
| ~CTRL-L |Clear Screen; move cursor to top to screen| * |
| ~ALT-B |Move one word backward on command line|
| ~ALT-F |Move one word forward on command line|
| ~CTRL-U |Erase line to left|
| ~CTRL-K |Erase line to the right|
| ~CTRL-W |Erase a word to left on command line| * |
| ~ALT-D |Erase a word to right on command line|
| ~CTRL-Y |Paste previously erased text|
| ~CTRL-D |Send EOF signal, ending input| * |
|~|Erase character under cursor| * |
|~|Log out (when no other text is on the command line)| * |
| ~Shift-INS |Paste clipboard at cursor| * |
| ~Shift-PgUp |Scroll window up|
| ~Shift-PgDn |Scroll window down|
| Tab |Auto-complete command or file name| * |
| Up Arrow |Previous Command| * |
| Down Arrow |Next command| * |
| Page Up |Previous command search| * |
| Page Down |Next command search| * |

{{Note{''Note:'' The above key sequences were listed with uppercase letters for clarity.  It is not necessary to also press the shift key.}}}

!! Tab Completion

The tab key will auto-complete commands or file names, pausing when it reaches a decision point.  

If I type the letters ''ad'' on the command line and press tab, the shell will autocomplete it to the string ''add'' before it reaches a decision point and cannot proceed without input.  If I press tab twice it will then show me the options I have to complete the command:
<<<
[root@shell data]# add
addgnupghome  addpart       addr2line     adduser
<<<

If I press the letter p and then tab again, the shell will know which command I'm looking for and auto-complete the command ''addpart''

The same auto-completion can be used for files.  The path to the networking configuration file on Linux systems is rather long.  Try this scenario on the class shell server:
* Type {{Command{cat /etc/sysco}}} and press ''tab''.  The shell should autocomplete that to {{Command{cat /etc/sysconfig/}}}.
* We're at a decision point since there are many different ways we could proceed.  Type: {{Command{netw}}} and press tab.  The shell will autocomplete that to {{Command{cat /etc/sysconfig/network}}}.
* Press the {{Command{-}}} key and press tab again.  The shell will autocomplete that to {{Command{cat /etc/sysconfig/network-scripts/}}}.
* Type {{Command{ifcfg-eth}}} and press tab twice.  We are presented with the available options.
* Type {{Command{0}}} and hit enter to view the network configuration file.

Using tab helped me identify the available files and reduced the amount of letters I needed to type to view the file.  It's slow at first, but once you get used to it greatly speeds up the speed and efficiency of using the shell and reduces the amount of information you have to remember.


!! Command recall

The page up and page down keys can be used to scroll through the recently used commands.  This isn't universal; the shell needs to be configured to support it, but its supported by most systems out of the box.

If you have a long command string that wasn't used very recently, rather then press the up arrow several times to find it, you can enter the first few letters of that command and then ~Page-Up.  The shell will cycle through your recent commands which began with those letters.

For example, a few days ago I ran the command {{Command{fail2ban-client status sshd-root}}} to see how many systems were trying to break into the class shell server.  Rather then type out that entire command (or have to remember it), if I enter the first few letters {{Command{fai}}} and then press ~Page-Up, the shell will search backward in my command history and bring me right to it.  If I used the up arrow, I'd first have to scroll through the hundreds of commands I may have entered since then.


!! Copy/Paste

In putty and most other terminal emulators, highlighting text with the mouse will copy it to the clipboard.  Clicking the right mouse button will paste text from the clipboard into the terminal at the position of the cursor.  If you are connecting from a Linux host like Kali instead of Windows, clicking the middle mouse button or scroll wheel will paste text to the terminal.  ~Shift-Insert will also paste text from the clipboard into the terminal.

// //''Name:'' Calendar plugin
// //''Version:'' 0.1.0
// //''Author:'' SteveRumsby

// //''Syntax:''
// //<< {{{listTags tag //sort// //prefix//}}} >>

// //''Description:''
// //Generate a list of tiddlers tagged with the given tag.
// //If both //sort// and //prefix// are omitted the list is sorted in increasing order of title, with one tiddler per line.
// //If //sort// is specified the list is sorted in increasing order of the given tiddler property. Possible properties are: title. modified, modifier.
// //If //prefix// is specified the given string is inserted before the tiddler title. The insertion happens before the text is wikified. This can be used to generated bulleted or numbered lists.

// //''Examples:''
// //<< {{{listTags usage}}} >> - generate a plain list of all tiddlers tagged with tag //usage//, sorted by title
// //<< {{{listTags usage modified}}} >> - the same list, with most recently modified tiddlers last
// //<< {{{listTags usage title #}}} >> - generate a numbered list if tiddlers tagged with //usage//, sorted by title

// //''Code section:''
version.extensions.listTags = {major: 0, minor: 1, revision: 0, date: new Date(2005, 6,16)};

config.macros.listTags = {
text: "Hello"
};

config.macros.listTags.handler = function(place,macroName,params)
{
 var tagged = store.getTaggedTiddlers(params[0], params[1]);
 var string = "";
 for(var r=0;r<tagged.length;r++)
 {
 if(params[2]) string = string + params[2] + " ";
 string = string + "[[" + tagged[r].title + "]]\n";
 }
 wikify(string, place, null, null);
}
&nbsp; <<defaultHome>>  [[Notebook]]  [[Virtual Machines]]  [[Outline]]  [[Calendar]]

<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="1572" height="724" poster="" data-setup="{}">
    <source src="video/naemon.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
/***
''NestedSlidersPlugin for TiddlyWiki version 1.2.x and 2.0''
^^author: Eric Shulman
source: http://www.TiddlyTools.com/#NestedSlidersPlugin
license: [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]^^

Quickly make any tiddler content into an expandable 'slider' panel, without needing to create a separate tiddler to contain the slider content.  Optional syntax allows ''default to open'', ''custom button label/tooltip'' and ''automatic blockquote formatting.''

You can also 'nest' these sliders as deep as you like (see complex nesting example below), so that expandable 'tree-like' hierarchical displays can be created.  This is most useful when converting existing in-line text content to create in-line annotations, footnotes, context-sensitive help, or other subordinate information displays.

For more details, please click on a section headline below:
++++!!!!![Configuration]>
Debugging messages for 'lazy sliders' deferred rendering:
<<option chkDebugLazySliderDefer>> show debugging alert when deferring slider rendering
<<option chkDebugLazySliderRender>> show debugging alert when deferred slider is actually rendered
===
++++!!!!![Usage]>
When installed, this plugin adds new wiki syntax for embedding 'slider' panels directly into tiddler content.  Use {{{+++}}} and {{{===}}} to delimit the slider content.  Additional optional syntax elements let you specify
*default to open
*cookiename
*heading level
*floater (with optional CSS width value)
*mouse auto rollover
*custom label/tooltip/accesskey
*automatic blockquote
*deferred rendering
The complete syntax, using all options, is:
//{{{
++++(cookiename)!!!!!^width^*[label=key|tooltip]>...
content goes here
===
//}}}
where:
* {{{+++}}} (or {{{++++}}}) and {{{===}}}^^
marks the start and end of the slider definition, respectively.  When the extra {{{+}}} is used, the slider will be open when initially displayed.^^
* {{{(cookiename)}}}^^
saves the slider opened/closed state, and restores this state whenever the slider is re-rendered.^^
* {{{!}}} through {{{!!!!!}}}^^
displays the slider label using a formatted headline (Hn) style instead of a button/link style^^
* {{{^width^}}} (or just {{{^}}})^^
makes the slider 'float' on top of other content rather than shifting that content downward.  'width' must be a valid CSS value (e.g., "30em", "180px", "50%", etc.).  If omitted, the default width is "auto" (i.e., fit to content)^^
* {{{*}}}^^
automatically opens/closes slider on "rollover" as well as when clicked^^
* {{{[label=key|tooltip]}}}^^
uses custom label/tooltip/accesskey.  {{{=key}}} and {{{|tooltip}}} are optional.  'key' is must be a ''single letter only''.  Default labels/tootips are: ">" (more) and "<" (less), with no default access key assignment.^^
* {{{">"}}} //(without the quotes)//^^
automatically adds blockquote formatting to slider content^^
* {{{"..."}}} //(without the quotes)//^^
defers rendering of closed sliders until the first time they are opened.  //Note: deferred rendering may produce unexpected results in some cases.  Use with care.//^^

//Note: to make slider definitions easier to read and recognize when editing a tiddler, newlines immediately following the {{{+++}}} 'start slider' or preceding the {{{===}}} 'end slider' sequence are automatically supressed so that excess whitespace is eliminated from the output.//
===
++++!!!!![Examples]>
simple in-line slider: 
{{{
+++
   content
===
}}}
+++
   content
===
----
use a custom label and tooltip: 
{{{
+++[label|tooltip]
   content
===
}}}
+++[label|tooltip]
   content
===
----
content automatically blockquoted: 
{{{
+++>
   content
===
}}}
+++>
   content
===
----
all options combined //(default open, cookie, heading, sized floater, rollover, label/tooltip/key, blockquoted, deferred)//
{{{
++++(testcookie)!!!^30em^*[label=Z|click or press Alt-Z to open]>...
   content
===
}}}
++++(testcookie)!!!^30em^*[label=Z|click or press Alt-Z to open]>...
   content
===
----
complex nesting example:
{{{
+++^[get info...=I|click for information or press Alt-I]
   put some general information here, plus a floating slider with more specific info:
   +++^10em^[view details...|click for details]
      put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
   ===
===
}}}
+++^[get info...=I|click for information or press Alt-I]
   put some general information here, plus a floating slider with more specific info:
   +++^10em^[view details...|click for details]
      put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
   ===
===
----
nested floaters
>menu: <<tiddler NestedSlidersExample>>
(see [[NestedSlidersExample]] for definition)
----
===
!!!!!Installation
<<<
import (or copy/paste) the following tiddlers into your document:
''NestedSlidersPlugin'' (tagged with <<tag systemConfig>>)
<<<
!!!!!Revision History
<<<
''2006.05.11 - 1.9.0'' added optional '^width^' syntax for floating sliders and '=key' syntax for setting an access key on a slider label
''2006.05.09 - 1.8.0'' in onClickNestedSlider(), when showing panel, set focus to first child input/textarea/select element
''2006.04.24 - 1.7.8'' in adjustSliderPos(), if floating panel is contained inside another floating panel, subtract offset of containing panel to find correct position
''2006.02.16 - 1.7.7'' corrected deferred rendering to account for use-case where show/hide state is tracked in a cookie
''2006.02.15 - 1.7.6'' in adjustSliderPos(), ensure that floating panel is positioned completely within the browser window (i.e., does not go beyond the right edge of the browser window)
''2006.02.04 - 1.7.5'' add 'var' to unintended global variable declarations to avoid FireFox 1.5.0.1 crash bug when assigning to globals
''2006.01.18 - 1.7.4'' only define adjustSliderPos() function if it has not already been provided by another plugin.  This lets other plugins 'hijack' the function even when they are loaded first.
''2006.01.16 - 1.7.3'' added adjustSliderPos(place,btn,panel,panelClass) function to permit specialized logic for placement of floating panels.  While it provides improved placement for many uses of floating panels, it exhibits a relative offset positioning error when used within *nested* floating panels.  Short-term workaround is to only adjust the position for 'top-level' floaters.
''2006.01.16 - 1.7.2'' added button property to slider panel elements so that slider panel can tell which button it belongs to.  Also, re-activated and corrected animation handling so that nested sliders aren't clipped by hijacking Slider.prototype.stop so that "overflow:hidden" can be reset to "overflow:visible" after animation ends
''2006.01.14 - 1.7.1'' added optional "^" syntax for floating panels.  Defines new CSS class, ".floatingPanel", as an alternative for standard in-line ".sliderPanel" styles.
''2006.01.14 - 1.7.0'' added optional "*" syntax for rollover handling to show/hide slider without requiring a click (Based on a suggestion by tw4efl)
''2006.01.03 - 1.6.2'' When using optional "!" heading style, instead of creating a clickable "Hn" element, create an "A" element inside the "Hn" element.  (allows click-through in SlideShowPlugin, which captures nearly all click events, except for hyperlinks)
''2005.12.15 - 1.6.1'' added optional "..." syntax to invoke deferred ('lazy') rendering for initially hidden sliders
removed checkbox option for 'global' application of lazy sliders
''2005.11.25 - 1.6.0'' added optional handling for 'lazy sliders' (deferred rendering for initially hidden sliders)
''2005.11.21 - 1.5.1'' revised regular expressions: if present, a single newline //preceding// and/or //following// a slider definition will be suppressed so start/end syntax can be place on separate lines in the tiddler 'source' for improved readability.  Similarly, any whitespace (newlines, tabs, spaces, etc.) trailing the 'start slider' syntax or preceding the 'end slider' syntax is also suppressed.
''2005.11.20 - 1.5.0'' added (cookiename) syntax for optional tracking and restoring of slider open/close state
''2005.11.11 - 1.4.0'' added !!!!! syntax to render slider label as a header (Hn) style instead of a button/link style
''2005.11.07 - 1.3.0'' removed alternative syntax {{{(((}}} and {{{)))}}} (so they can be used by other
formatting extensions) and simplified/improved regular expressions to trim multiple excess newlines
''2005.11.05 - 1.2.1'' changed name to NestedSlidersPlugin
more documentation
''2005.11.04 - 1.2.0'' added alternative character-mode syntax {{{(((}}} and {{{)))}}}
tweaked "eat newlines" logic for line-mode {{{+++}}} and {{{===}}} syntax
''2005.11.03 - 1.1.1'' fixed toggling of default tooltips ("more..." and "less...") when a non-default button label is used
code cleanup, added documentation
''2005.11.03 - 1.1.0'' changed delimiter syntax from {{{(((}}} and {{{)))}}} to {{{+++}}} and {{{===}}}
changed name to EasySlidersPlugin
''2005.11.03 - 1.0.0'' initial public release
<<<
!!!!!Credits
<<<
This feature was implemented by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]] with initial research and suggestions from RodneyGomes, GeoffSlocock, and PaulPetterson.
<<<
!!!!!Code
***/
//{{{
version.extensions.nestedSliders = {major: 1, minor: 9, revision: 0, date: new Date(2006,5,11)};
//}}}

//{{{
// options for deferred rendering of sliders that are not initially displayed
if (config.options.chkDebugLazySliderDefer==undefined) config.options.chkDebugLazySliderDefer=false;
if (config.options.chkDebugLazySliderRender==undefined) config.options.chkDebugLazySliderRender=false;

// default styles for 'floating' class
setStylesheet(".floatingPanel { position:absolute; z-index:10; padding:0.5em; margin:0em; \
	background-color:#eee; color:#000; border:1px solid #000; text-align:left; }","floatingPanelStylesheet");
//}}}

//{{{
config.formatters.push( {
	name: "nestedSliders",
	match: "\\n?\\+{3}",
	terminator: "\\s*\\={3}\\n?",
	lookahead: "\\n?\\+{3}(\\+)?(\\([^\\)]*\\))?(\\!*)?(\\^(?:[^\\^\\*\\[\\>]*\\^)?)?(\\*)?(\\[[^\\]]*\\])?(\\>)?(\\.\\.\\.)?\\s*",
	handler: function(w)
		{
			var lookaheadRegExp = new RegExp(this.lookahead,"mg");
			lookaheadRegExp.lastIndex = w.matchStart;
			var lookaheadMatch = lookaheadRegExp.exec(w.source)
			if(lookaheadMatch && lookaheadMatch.index == w.matchStart)
			{
				// location for rendering button and panel
				var place=w.output;

				// default to closed, no cookie, no accesskey
				var show="none"; var title=">"; var tooltip="show"; var cookie=""; var key="";

				// extra "+", default to open
				if (lookaheadMatch[1])
					{ show="block"; title="<"; tooltip="hide"; }

				// cookie, use saved open/closed state
				if (lookaheadMatch[2]) {
					cookie=lookaheadMatch[2].trim().slice(1,-1);
					cookie="chkSlider"+cookie;
					if (config.options[cookie]==undefined)
						{ config.options[cookie] = (show=="block") }
					if (config.options[cookie])
						{ show="block"; title="<"; tooltip="hide"; }
					else
						{ show="none"; title=">"; tooltip="show"; }
				}

				// parse custom label/tooltip/accesskey: [label=X|tooltip]
				if (lookaheadMatch[6]) {
					title = lookaheadMatch[6].trim().slice(1,-1);
					var pos=title.indexOf("|");
					if (pos!=-1) { tooltip = title.substr(pos+1,title.length); title=title.substr(0,pos); }
					if (title.substr(title.length-2,1)=="=") { key=title.substr(title.length-1,1); title=title.slice(0,-2); }
					if (pos==-1) tooltip += " "+title; // default tooltip: "show/hide <title>"
				}

				// create the button
				if (lookaheadMatch[3]) { // use "Hn" header format instead of button/link
					var lvl=(lookaheadMatch[3].length>6)?6:lookaheadMatch[3].length;
					var btn = createTiddlyElement(createTiddlyElement(place,"h"+lvl,null,null,null),"a",null,null,title);
					btn.onclick=onClickNestedSlider;
					btn.setAttribute("href","javascript:;");
					btn.setAttribute("title",tooltip);
				}
				else
					var btn = createTiddlyButton(place,title,tooltip,onClickNestedSlider);
				btn.sliderCookie = cookie; // save the cookiename (if any) in the button object
				btn.keyparam=key; // save the access key letter ("" if none)
				if (key.length) {
					btn.setAttribute("accessKey",key); // init access key
					btn.onfocus=function(){this.setAttribute("accessKey",this.keyparam);}; // **reclaim** access key on focus
				}

				// "non-click" MouseOver open/close slider
				if (lookaheadMatch[5]) btn.onmouseover=onClickNestedSlider;

				// create slider panel
				var panelClass=lookaheadMatch[4]?"floatingPanel":"sliderPanel";
				var panel=createTiddlyElement(place,"div",null,panelClass,null);
				panel.style.display = show;
				if (lookaheadMatch[4] && lookaheadMatch[4].length>2) panel.style.width=lookaheadMatch[4].slice(1,-1); // custom width
				panel.button = btn; // so the slider panel know which button it belongs to
				btn.sliderPanel=panel;

				// render slider (or defer until shown) 
				w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
				if ((show=="block")||!lookaheadMatch[8]) {
					// render now if panel is supposed to be shown or NOT deferred rendering
					w.subWikify(lookaheadMatch[7]?createTiddlyElement(panel,"blockquote"):panel,this.terminator);
					// align slider/floater position with button
					adjustSliderPos(place,btn,panel,panelClass);
				}
				else {
					var src = w.source.substr(w.nextMatch);
					var endpos=findMatchingDelimiter(src,"+++","===");
					panel.setAttribute("raw",src.substr(0,endpos));
					panel.setAttribute("blockquote",lookaheadMatch[7]?"true":"false");
					panel.setAttribute("rendered","false");
					w.nextMatch += endpos+3;
					if (w.source.substr(w.nextMatch,1)=="\n") w.nextMatch++;
					if (config.options.chkDebugLazySliderDefer) alert("deferred '"+title+"':\n\n"+panel.getAttribute("raw"));
				}
			}
		}
	}
)

// TBD: ignore 'quoted' delimiters (e.g., "{{{+++foo===}}}" isn't really a slider)
function findMatchingDelimiter(src,starttext,endtext) {
	var startpos = 0;
	var endpos = src.indexOf(endtext);
	// check for nested delimiters
	while (src.substring(startpos,endpos-1).indexOf(starttext)!=-1) {
		// count number of nested 'starts'
		var startcount=0;
		var temp = src.substring(startpos,endpos-1);
		var pos=temp.indexOf(starttext);
		while (pos!=-1)  { startcount++; pos=temp.indexOf(starttext,pos+starttext.length); }
		// set up to check for additional 'starts' after adjusting endpos
		startpos=endpos+endtext.length;
		// find endpos for corresponding number of matching 'ends'
		while (startcount && endpos!=-1) {
			endpos = src.indexOf(endtext,endpos+endtext.length);
			startcount--;
		}
	}
	return (endpos==-1)?src.length:endpos;
}
//}}}

//{{{
window.onClickNestedSlider=function(e)
{
	if (!e) var e = window.event;
	var theTarget = resolveTarget(e);
	var theLabel = theTarget.firstChild.data;
	var theSlider = theTarget.sliderPanel
	var isOpen = theSlider.style.display!="none";
	// if using default button labels, toggle labels
	if (theLabel==">") theTarget.firstChild.data = "<";
	else if (theLabel=="<") theTarget.firstChild.data = ">";
	// if using default tooltips, toggle tooltips
	if (theTarget.getAttribute("title")=="show")
		theTarget.setAttribute("title","hide");
	else if (theTarget.getAttribute("title")=="hide")
		theTarget.setAttribute("title","show");
	if (theTarget.getAttribute("title")=="show "+theLabel)
		theTarget.setAttribute("title","hide "+theLabel);
	else if (theTarget.getAttribute("title")=="hide "+theLabel)
		theTarget.setAttribute("title","show "+theLabel);
	// deferred rendering (if needed)
	if (theSlider.getAttribute("rendered")=="false") {
		if (config.options.chkDebugLazySliderRender)
			alert("rendering '"+theLabel+"':\n\n"+theSlider.getAttribute("raw"));
		var place=theSlider;
		if (theSlider.getAttribute("blockquote")=="true")
			place=createTiddlyElement(place,"blockquote");
		wikify(theSlider.getAttribute("raw"),place);
		theSlider.setAttribute("rendered","true");
	}
	// show/hide the slider
	if(config.options.chkAnimate)
		anim.startAnimating(new Slider(theSlider,!isOpen,e.shiftKey || e.altKey,"none"));
	else
		theSlider.style.display = isOpen ? "none" : "block";
	// if showing panel, set focus to first 'focus-able' element in panel
	if (theSlider.style.display!="none") {
		var ctrls=theSlider.getElementsByTagName("*");
		for (var c=0; c<ctrls.length; c++) {
			var t=ctrls[c].tagName.toLowerCase();
			if (t=="input" || t=="textarea" || t=="select")
				{ ctrls[c].focus(); break; }
		}
	}
	if (this.sliderCookie && this.sliderCookie.length)
		{ config.options[this.sliderCookie]=!isOpen; saveOptionCookie(this.sliderCookie); }
	// align slider/floater position with target button
	adjustSliderPos(theSlider.parentNode,theTarget,theSlider,theSlider.className);
	return false;
}

// hijack animation handler 'stop' handler so overflow is visible after animation has completed
Slider.prototype.coreStop = Slider.prototype.stop;
Slider.prototype.stop = function() { this.coreStop(); this.element.style.overflow = "visible"; }

// adjust panel position based on button position
if (window.adjustSliderPos==undefined) window.adjustSliderPos=function(place,btn,panel,panelClass) {
	if (panelClass=="floatingPanel") {
		var left=0;
		var top=btn.offsetHeight; 
		if (place.style.position!="relative") {
			var left=findPosX(btn);
			var top=findPosY(btn)+btn.offsetHeight;
			var p=place; while (p && p.className!='floatingPanel') p=p.parentNode;
			if (p) { left-=findPosX(p); top-=findPosY(p); }
		}
		if (left+panel.offsetWidth > getWindowWidth()) left=getWindowWidth()-panel.offsetWidth-10;
		panel.style.left=left+"px"; panel.style.top=top+"px";
	}
}

function getWindowWidth() {
	if(document.width!=undefined)
		return document.width; // moz (FF)
	if(document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) )
		return document.documentElement.clientWidth; // IE6
	if(document.body && ( document.body.clientWidth || document.body.clientHeight ) )
		return document.body.clientWidth; // IE4
	if(window.innerWidth!=undefined)
		return window.innerWidth; // IE - general
	return 0; // unknown
}
//}}}
* [[Class Syllabus|syllabus/NCS205Syllabus2401.pdf]]
* [[General SOPs]]
* [[Lab Assignments]]
* [[Class Participation]]
** [[Using Discord]]
* [[Shell script submission requirements]]
* [[Shell scripting best practices]]
* [[Material Sections]]
/% ** [[Using Blackboard]] %/

Other helpful material to make things easier:
* [[Working more efficiently with GNU screen & SSH keys]]
* [[Tunnels & Proxies with SSH]]


!!Handouts
[[Command line summary handout|handouts/UnixCommandSummary.pdf]]
[[Substitution Handout|handouts/SubstitutionHandout.pdf]] (from tcsh man page)
[[ASCII Chart|handouts/ascii-chart.gif]]
[[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]]
[[Regular expression metacharacters]]
* [[Metacharacter Handout|handouts/Metacharacters.pdf]] - Metacharacters and how they differ in the shell & regular expression contexts.
[[vi diagram handout|handouts/viDiagram.pdf]]
[[awk handout|handouts/awkHandout.pdf]]


!!Reference Material
[[Class technology stack]] - Mostly my notes for setting up our class servers
[[UNIX in a Nutshell|http://books.google.com/books?id=YkNiiLupct4C&dq=unix+in+a+nutshell&printsec=frontcover&source=bn&hl=en&ei=aKlWS43lJJCOlQeW3rSCBA&sa=X&oi=book_result&ct=result&resnum=5&ved=0CCIQ6AEwBA#v=onepage&q=&f=false]] - Google books
[[The Linux Command Line (No Starch Press)|http://www.merantn.net/reference/TLCL-19.01.pdf]]
[[UNIX Toolbox|https://merantn.net/docs/unixtoolbox.xhtml]]
[[Shell scripting notes]]
[[Table of Commands]]
** [[less quick reference]]
[[Linux Shortcuts]]
/***
|Name|OpenTopPlugin|
|Created by|SaqImtiaz|
|Location|http://lewcid.googlepages.com/lewcid.html#OpenTopPlugin|
|Version|0.1|
|Requires|~TW2.x|
!!!Description:
Open new tiddlers at the top of the screen.

!!!Code
***/
//{{{
Story.prototype.coreLewcidDisplayTiddler=Story.prototype.displayTiddler ;
Story.prototype.displayTiddler =
function(srcElement,title,template,unused1,unused2,animate,slowly)
{
       var srcElement=null;
       if (document.getElementById(this.idPrefix + title))
          {story.closeTiddler(title);}
       this.coreLewcidDisplayTiddler(srcElement,title,template,unused1,unused2,animate,slowly);
       window.scrollTo(0,0);
}
//}}}
<<option chkSaveBackups>> SaveBackups
<<option chkAutoSave>> AutoSave
<<option chkRegExpSearch>> RegExpSearch
<<option chkCaseSensitiveSearch>> CaseSensitiveSearch
<<option chkAnimate>> EnableAnimations
----
Also see AdvancedOptions
[[Week 3, Part 2]] - Home Directories & Shell documentation
[[Week 3, Part 1]] - Links & File Globbing
* Links:
** Read 
*** Chapter 3, pp 23 & 24
*** Chapter 4, pp 33 & 34
** Watch:
*** Links - https://www.youtube.com/watch?v=lW_V8oFxQgA
** Complete: [[Lab 8|labs/lab8.pdf]]
* File Globbing:
** Read: Chapter 4, pp 25-27 (Wildcards)
** Watch: 
*** File Globbing: https://www.youtube.com/watch?v=QIysdjpiLcA
*** Brace Expansion: https://www.youtube.com/watch?v=LGzSnVYS2J4
** Complete:  [[Lab 9|labs/lab9.pdf]] & [[Lab 10|labs/lab10.pdf]]
----
[[Week 2, Part 2]] - Manipulating Files & Directories
* Review: Complete [[Lab 5|labs/lab5.pdf]]
* Read Chapter 4 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
** You can gloss over the parts about wildcards (pp 26-27) for now.  We'll come back to them later.
** Focus on becoming familiar with the available commands.
* Watch:
** Creating and Deleting files and directories: https://www.youtube.com/watch?v=91FhiTyEaCU
** Moving and copying files: https://www.youtube.com/watch?v=GKEGNdNIQrw
* Complete [[Lab 6|labs/lab6.pdf]] & [[Lab 7|labs/lab7.pdf]]

[[Week 2, Part 1]] - Exploring the system 
* Read Chapter 3 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
* Complete [[Lab 3|labs/lab3.pdf]] & [[Lab 4|labs/lab4.pdf]]
----
[[Week 1, Part 2]] - The Filesystem
* Read Chapter 2 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - Navigation
* Complete [[Lab 2|labs/Lab2.pdf]]

[[Week 1, Part 1]] - Unix Intro
* Read Chapter 1 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - What is the Shell?
* Complete [[Lab 1|labs/Lab1.pdf]]
<!--{{{-->
<div class='header' macro='gradient vert #000 #069'>
<div id='topTitle' class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div id='topTitle' class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;&nbsp;&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div id='topMenu' refresh='content' tiddler='MainMenu'></div>
<div id='rightMenu' refresh='content' tiddler='RightMenu'></div>
</div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
function onClickDefaultHome(e) {
story.closeAllTiddlers();
config.options.txtDefaultTiddlers = "";
saveOptionCookie('txtDefaultTiddlers');
var start = store.getTiddlerText("DefaultTiddlers");
if(start)
story.displayTiddlers(null,start.readBracketedList());
}

config.macros["defaultHome"] = {label: "Home", prompt: "Show the default tiddlers", title: "Home"};
config.macros.defaultHome.handler = function(place) {
createTiddlyButton(place,this.label,this.prompt,onClickDefaultHome);

}
config.macros.listTags = { text: "Hello" };
config.macros.listTags.handler = function(place,macroName,params)
{
 var tagged = store.getTaggedTiddlers(params[0]);
 var ul = createTiddlyElement(place,"ul",null,null,"");
 for(var r=0;r<tagged.length;r++)
 {
 var li = createTiddlyElement(ul,"li",null,null,"");
 createTiddlyLink(li,tagged[r].title,true);
 }
}
Type the text for 'Plugins'
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="466" height="448" poster="" data-setup="{}">
    <source src="video/PuttyProxy.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
/***
|Name:|QuickOpenTagPlugin|
|Description:|Changes tag links to make it easier to open tags as tiddlers|
|Version:|3.0.1 ($Rev: 3861 $)|
|Date:|$Date: 2008-03-08 10:53:09 +1000 (Sat, 08 Mar 2008) $|
|Source:|http://mptw.tiddlyspot.com/#QuickOpenTagPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License:|http://mptw.tiddlyspot.com/#TheBSDLicense|
***/
//{{{
config.quickOpenTag = {

	dropdownChar: (document.all ? "\u25bc" : "\u25be"), // the little one doesn't work in IE?

	createTagButton: function(place,tag,excludeTiddler) {
		// little hack so we can do this: <<tag PrettyTagName|RealTagName>>
		var splitTag = tag.split("|");
		var pretty = tag;
		if (splitTag.length == 2) {
			tag = splitTag[1];
			pretty = splitTag[0];
		}

		var sp = createTiddlyElement(place,"span",null,"quickopentag");
		createTiddlyText(createTiddlyLink(sp,tag,false),pretty);

		var theTag = createTiddlyButton(sp,config.quickOpenTag.dropdownChar,
                        config.views.wikified.tag.tooltip.format([tag]),onClickTag);
		theTag.setAttribute("tag",tag);
		if (excludeTiddler)
			theTag.setAttribute("tiddler",excludeTiddler);
    		return(theTag);
	},

	miniTagHandler: function(place,macroName,params,wikifier,paramString,tiddler) {
		var tagged = store.getTaggedTiddlers(tiddler.title);
		if (tagged.length > 0) {
			var theTag = createTiddlyButton(place,config.quickOpenTag.dropdownChar,
                        	config.views.wikified.tag.tooltip.format([tiddler.title]),onClickTag);
			theTag.setAttribute("tag",tiddler.title);
			theTag.className = "miniTag";
		}
	},

	allTagsHandler: function(place,macroName,params) {
		var tags = store.getTags(params[0]);
		var filter = params[1]; // new feature
		var ul = createTiddlyElement(place,"ul");
		if(tags.length == 0)
			createTiddlyElement(ul,"li",null,"listTitle",this.noTags);
		for(var t=0; t<tags.length; t++) {
			var title = tags[t][0];
			if (!filter || (title.match(new RegExp('^'+filter)))) {
				var info = getTiddlyLinkInfo(title);
				var theListItem =createTiddlyElement(ul,"li");
				var theLink = createTiddlyLink(theListItem,tags[t][0],true);
				var theCount = " (" + tags[t][1] + ")";
				theLink.appendChild(document.createTextNode(theCount));
				var theDropDownBtn = createTiddlyButton(theListItem," " +
					config.quickOpenTag.dropdownChar,this.tooltip.format([tags[t][0]]),onClickTag);
				theDropDownBtn.setAttribute("tag",tags[t][0]);
			}
		}
	},

	// todo fix these up a bit
	styles: [
"/*{{{*/",
"/* created by QuickOpenTagPlugin */",
".tagglyTagged .quickopentag, .tagged .quickopentag ",
"	{ margin-right:1.2em; border:1px solid #eee; padding:2px; padding-right:0px; padding-left:1px; }",
".quickopentag .tiddlyLink { padding:2px; padding-left:3px; }",
".quickopentag a.button { padding:1px; padding-left:2px; padding-right:2px;}",
"/* extra specificity to make it work right */",
"#displayArea .viewer .quickopentag a.button, ",
"#displayArea .viewer .quickopentag a.tiddyLink, ",
"#mainMenu .quickopentag a.tiddyLink, ",
"#mainMenu .quickopentag a.tiddyLink ",
"	{ border:0px solid black; }",
"#displayArea .viewer .quickopentag a.button, ",
"#mainMenu .quickopentag a.button ",
"	{ margin-left:0px; padding-left:2px; }",
"#displayArea .viewer .quickopentag a.tiddlyLink, ",
"#mainMenu .quickopentag a.tiddlyLink ",
"	{ margin-right:0px; padding-right:0px; padding-left:0px; margin-left:0px; }",
"a.miniTag {font-size:150%;} ",
"#mainMenu .quickopentag a.button ",
"	/* looks better in right justified main menus */",
"	{ margin-left:0px; padding-left:2px; margin-right:0px; padding-right:0px; }",
"#topMenu .quickopentag { padding:0px; margin:0px; border:0px; }",
"#topMenu .quickopentag .tiddlyLink { padding-right:1px; margin-right:0px; }",
"#topMenu .quickopentag .button { padding-left:1px; margin-left:0px; border:0px; }",
"/*}}}*/",
		""].join("\n"),

	init: function() {
		// we fully replace these builtins. can't hijack them easily
		window.createTagButton = this.createTagButton;
		config.macros.allTags.handler = this.allTagsHandler;
		config.macros.miniTag = { handler: this.miniTagHandler };
		config.shadowTiddlers["QuickOpenTagStyles"] = this.styles;
		store.addNotification("QuickOpenTagStyles",refreshStyles);
	}
}

config.quickOpenTag.init();

//}}}

| !Symbol | !Meaning | !Escape | !Not supported by |
| ^ |Start of line| | |
| $ |End of line| | |
| [ ] |Character Classes (match any one character listed) | | |
|~|Characters may be specified singly or in ranges| | |
| [^ ] |Negated character class (match any one character not listed| | |
| ? |Optional item.  Match 0 or 1. | | sed |
| ( ) |Alternation (match any one of the sub-expressions)| | |
|~|Grouping| | |
|~|Capture backreference Access with \//n//| * | |
| {{{|}}} |Or.  Match either expression it separates.  Use with ( )| | |
| . |Any single character| | |
| + |Repetition:  1 or more. | | sed |
| * |Repetition: 0 or more| | |
| { } |Defined range of matches (bounds) {//min//,//max//} or {//min//,} or {//exactly//}| * | |
| \ |Suppress normal behavior of a metacharacter| | |
|~|Access a backreference:  \//n//| | |
| \< |Match start of word.| * | bsd sed |
| \> |Match end of word.| * | bsd sed |


| !Symbol | !File Globbing   | !Regex | !Regex Equivalent |
| ? |Exactly 1|0 or 1| . |
| { } |Sets|# of matches| ( ) |
/***
|Name:|RenameTagsPlugin|
|Description:|Allows you to easily rename or delete tags across multiple tiddlers|
|Version:|3.0 ($Rev: 5501 $)|
|Date:|$Date: 2008-06-10 23:11:55 +1000 (Tue, 10 Jun 2008) $|
|Source:|http://mptw.tiddlyspot.com/#RenameTagsPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License|http://mptw.tiddlyspot.com/#TheBSDLicense|
Rename a tag and you will be prompted to rename it in all its tagged tiddlers.
***/
//{{{
config.renameTags = {

	prompts: {
		rename: "Rename the tag '%0' to '%1' in %2 tidder%3?",
		remove: "Remove the tag '%0' from %1 tidder%2?"
	},

	removeTag: function(tag,tiddlers) {
		store.suspendNotifications();
		for (var i=0;i<tiddlers.length;i++) {
			store.setTiddlerTag(tiddlers[i].title,false,tag);
		}
		store.resumeNotifications();
		store.notifyAll();
	},

	renameTag: function(oldTag,newTag,tiddlers) {
		store.suspendNotifications();
		for (var i=0;i<tiddlers.length;i++) {
			store.setTiddlerTag(tiddlers[i].title,false,oldTag); // remove old
			store.setTiddlerTag(tiddlers[i].title,true,newTag);  // add new
		}
		store.resumeNotifications();
		store.notifyAll();
	},

	storeMethods: {

		saveTiddler_orig_renameTags: TiddlyWiki.prototype.saveTiddler,

		saveTiddler: function(title,newTitle,newBody,modifier,modified,tags,fields,clearChangeCount,created) {
			if (title != newTitle) {
				var tagged = this.getTaggedTiddlers(title);
				if (tagged.length > 0) {
					// then we are renaming a tag
					if (confirm(config.renameTags.prompts.rename.format([title,newTitle,tagged.length,tagged.length>1?"s":""])))
						config.renameTags.renameTag(title,newTitle,tagged);

					if (!this.tiddlerExists(title) && newBody == "")
						// dont create unwanted tiddler
						return null;
				}
			}
			return this.saveTiddler_orig_renameTags(title,newTitle,newBody,modifier,modified,tags,fields,clearChangeCount,created);
		},

		removeTiddler_orig_renameTags: TiddlyWiki.prototype.removeTiddler,

		removeTiddler: function(title) {
			var tagged = this.getTaggedTiddlers(title);
			if (tagged.length > 0)
				if (confirm(config.renameTags.prompts.remove.format([title,tagged.length,tagged.length>1?"s":""])))
					config.renameTags.removeTag(title,tagged);
			return this.removeTiddler_orig_renameTags(title);
		}

	},

	init: function() {
		merge(TiddlyWiki.prototype,this.storeMethods);
	}
}

config.renameTags.init();

//}}}

Type the text for 'Resources'
<<toggleSideBar "" "Toggle Sidebar" hide>>
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="858" height="480" poster="" data-setup="{}">
    <source src="video/ssh.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
!! Grading

Shell scripting labs will follow a more traditional grading approach where only meeting the objectives of the script will receive a B grade, or 8.5 / 10.  A grade beyond that will require exceeding the minimum expectations.

15% of shell scripting lab grades will be reserved for style, efficiency, and completeness.  /% For example, if the  %/

| !Grade | !Quality |
| A |Exceptional - Exceeds expectations|
| B |Average - Meets expectations|
| C |Satisfactory - meets some expectations|
| D |Poor - minimally meets expectations|
| F |Does not meet minimal expectations|


!! Requirements

The following procedure must be followed for submitting shell scripting labs. Improperly submitted scripting labs will not be accepted.  

The end goal of this process is to submit a single PDF containing these three components:
&nbsp;&nbsp;''a.'' Original lab assignment sheet as a cover page
&nbsp;&nbsp;''b.'' Your shell scripts
&nbsp;&nbsp;''c.'' A demonstration of your scripts


''1.'' Create the directory ~/bin/. Save all lab shell scripts in this directory with the naming convention ''ncs205-lab//xx//-q//yy//.sh'' where ''//xx//'' is the lab number and ''//yy//'' is the question number. It would make things easier for you if you always use two digits for //xx// and //yy//.
I may refer to the script files if I need to execute/test any of your scripts.

''2.'' A proper shebang must be added as the first line of your shell scripts.

''3.'' The following header must be placed at the top of each script file, immediately after the shebang:
{{{
# File name: 
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#
#
#
}}}

The //Purpose// field should contain a brief, one-line summary of what your script is accomplishing.  The Description field should contain more detailed information regarding how it is accomplishing that goal or any additional information helpful to understand the function of your script.


''4.'' Make use of comments throughout your script to document and convey what you're doing.
Long lines should be nicely wrapped with carriage returns. Cut long lines at about column 60. (makes it easier to read and print)
* You can escape the newline with a {{Command{''\''}}} to continue a long line of commands on the next line.  For example:
{{{
dig axfr ${user}.ncs205.net @ns1.${user}.ncs205.net | \
	grep -v ^\;  | sort | md5sum | cut -d " " -f 1
}}}

{{Note{''Note:'' The remaining two steps are for labs which are //only// scripts and do not contain input boxes}}}

''5.'' Use the {{Command{script}}} command to launch a recording shell, saving the output to {{File{~/bin/labxx.raw}}} where //xx// is the lab number. Demonstrate execution of your scripts within this recording shell.
* Execute {{Command{script ~/bin/labxx.raw}}} to start the recording shell, saving output to the filename specified as the first command line argument
* Run your scripts. Everything you type and all output will be recorded in the file {{File{~/bin/labxx.raw}}}.
* Be sure you do not have excessive errors in the recording.  Pressing the backspace key will be recorded as a separate keystroke and make your demonstration harder to read.
* Type {{Command{exit}}} to terminate the recording shell.
* If you examine the {{File{~/bin/labxx.raw}}}, you will see it contains a lot of control characters.  The {{Command{ ansifilter }}} command will remove them.
** {{Command{ ansifilter -o ~/bin/labxx.out ~/bin/labxx.raw }}}

''6.'' Create a PDF of your scripts to save to the {{File{/opt/pub/ncs205/submit/}}} directory:
* The comments below explain what's going on.  
* The first paragraph only explains the {{Command{enscript}}} command.  The second paragraph contains the two commands you'll need to execute to submit your lab.

{{{
# enscript is a great tool for formatting documents about to be printed or saved as a PDF.
# The following command string will gather your labs and the output from the demo of your scripts, apply some 
# document formatting, and display PostScript on STDOUT.
# The -o - option for enscript instructs the command to sent its output to STDOUT instead of saving it to a file
enscript -2jr -C -o - ~/bin/ncs205-labxx-q??.sh ~/bin/labxx.out

# PostScript is mostly the language of printers and isn't as useful on modern PCs. Instead of working with 
# native PostScript or displaying STDOUT to the screen, let's convert it to PDF and save to a file.
# Caution! Only run this command when you are ready to submit your scripts. 
# *** These are the command you will execute to submit your scripting labs ***
enscript -2jr -C -o - ~/bin/ncs205-labxx-q??.sh ~/bin/labxx.out | ps2pdf - ~/bin/ncs205-labxx-username.pdf
# Note: The - in the above ps2pdf command instructs the command to obtain its input from STDIN.
# The next command will combine the lab assignment PDF as a cover page with the PDF you just created containing your scripts, 
# saving the output to the class submit directory.  This is the PDF you are submitting for my review.
cpdf /opt/pub/ncs205/labs/labxx.pdf ~/bin/ncs205-labxx-username.pdf -o /opt/pub/ncs205/submit/ncs205-labxx-username.pdf

# Be sure to follow the standard lab naming scheme and change the xx and username to proper values
# The nice thing about using standard naming conventions is it makes everything easy to script. 
# Rather than have to search for these commands for every scripting lab you need to submit, you might as well make a dynamic script out of it.
# (Hint: This will be a future assignment.  It'll be more useful to you if you start working on it now.)
}}}

''7.''  Preview your submitted PDF
> Download the PDF saved to the {{File{submit/}}} directory to check your work.  If you skip this important step and submit a PDF for review that does not contain your scripts, you will either receive no credit for the lab or a late penalty for resubmitting.

{{Note{''Note:'' The video below demonstrates the deprecated {{Command{a2ps}}} command.  The new process instead uses {{Command{enscript}}} which is a drop-in replacement.  The video has not yet been updated to reflect this change}}}

<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="1000" height="662" poster="" data-setup="{}">
    <source src="video/scripts.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
The following general best practices will make it much easier for you to write your scripts and help ensure they're correct.  Good practices will also help others understand what your scripts are doing.  

!! Items to include within your script:

1. Start your script with a proper shebang.
<<<
The first line of your script, the shebang (shell bang because it starts with a {{Monospace{#!}}}) must contain the shell interpreter to use.  Since these scripts are otherwise just text files, this line indicates what language your script was written in.  Bourne shell scripts should have this on the first line:  {{Monospace{''#!/bin/sh''}}}
<<<

2. Provide clarity
<<<
You may not be the only one using your script.  Others may have to look at the code to troubleshoot or make modifications later.  Or you may have to come back years later to decipher what you did and what your past self was thinking.  Good usability should be built into everything you do.  Be sure your code is well laid out and clear to follow.  If you make it so I have a hard time understanding your logic and workflow with these simple scripts when I know the objective, then others will surely have difficulty understanding more complex ones later.
<<<

3. Add proper header information to your script
<<<
Identifying the script author, creation date, and purpose at the top of your script is important.  I encounter far too many scripts written by others at my day job which lack this detail.  Who do I go to if there's issues with this script?  How old is this?  Is it ancient and not valid anymore as processes have changed?
<<<

4. Add comments to explain what you are doing and the purpose of each section
<<<
Comments should be utilized to explain what you are doing or your methodology if the command itself does not make it clear.  Simple and obvious commands and processes may be self-documenting.  Others should have comments to explain them.  It's also helpful to cut your comments at about 60 characters so they don't wrap to the next line in the terminal or PDF copy.

Comments should be concise and professional.  Unnecessary verbosity can cause your meaning to be lost.
<<<

5. Cut long lines
<<<
Very long command lines can be cut and extended to the next line to help readability.  The backslash can be used to escape the newline if it's the last character in the line.  By escaping the newline, your long command string can then continue on the next line.  

Consider these two examples.  Notice the use of the backslash in the long command string.
{{{
# Combine lab sheet as a cover page with PDF containing shell 
# scripts, saving output to submit directory.
cpdf /opt/pub/ncs205/labs/labxx.pdf ~/bin/ncs205-labxx-username.pdf -o /opt/pub/ncs205/submit/ncs205-labxx-username.pdf
}}}
{{{
# Combine lab sheet as a cover page with PDF containing shell 
# scripts, saving output to submit directory.
cpdf /opt/pub/ncs205/labs/labxx.pdf ~/bin/ncs205-labxx-username.pdf \
	-o /opt/pub/ncs205/submit/ncs205-labxx-username.pdf
}}}
<<<

6. Use meaningful variable names
<<<
Choose descriptive variable names to make your code self-documenting. If your variable should contain a file name, the name of the variable should be something line {{Monospace{''//filename//''}}}, not something generic like {{Monospace{''//var//''}}}.  Also avoid using one-letter variable names (e.g., {{Monospace{x}}} or {{Monospace{i}}} when possible.
<<<

7. Use double quotes with variables (when you can)
<<<
Enclose variables in double quotes to handle spaces and special characters correctly, especially when using variables within if-statements.  There may be times you cannot use quotes, but these are the exception and not the norm.
<<<

8. Properly indent your code
<<<
Follow a consistent and readable indentation style to make your code visually appealing and logic easier to follow.  Any control structures should be properly indented.

Consider these two small examples.  The first with no indentation and the second with proper indentation.  The issue becomes more pronounced as control structures become nested and larger.
{{{
if [ -n "$note" ]
then
echo $note
fi
}}}
{{{
if [ -n "$note" ]
then
	echo $note
fi
}}}
<<<

9. Avoid hardcoding values
<<<
Minimize hardcoding values in your script. Use variables or configuration files to store and manage settings.  For example, if you have to refer to a file later in your script, perhaps defining a variable at the top of the script would make the script easier to maintain?
<<<

10. Avoid code duplication
<<<
Avoid using the same block of code in multiple places within your script.  If you find that you have done this, then look at ways to either adjust the script's logic or approach to remove the code reuse.  We aren't covering functions here, but proper use of functions can typically help with this in larger scripts.
<<<

!! Script validation and debugging

1. Debugging statements may be helpful
<<<
You can run your script with {{Monospace{bash -x}}} to see the actual lines that are being executed in your script, after any substitutions occur.  This can be a great tool for identifying why something isn't working out the way it should.
{{{
bash -x scriptname.sh
}}}

Debugging {{Monospace{echo}}} statements throughout your script can help show any intermediate values as they're being used.  This way we can more easily visualize what these variables contain as the script is executing.
{{{
echo DEBUG:  $mdate : $mtime : $oneday : $threeday
}}}
Just remember to remove or comment them before submitting the script
<<<

2. Test your script
<<<
Write test cases to ensure your script works as expected, especially when making changes or updates.  Be sure to test different scenarios and test for failure.  Don't just provide what's expected.  What happens if unexpected input is provided.

We won't have all the tools to handle this when we start our shell scripting work, but we should by the end.
<<<

3.  Use version control or make backups
<<<
Keep your scripts in a version control system (e.g., Git) to track changes and easily roll back to an old version if you need to.

Even if you don't use a full version control system, always have a back-out plan if a change doesn't work.  Maybe back up the file before making changes so you can revert or comment out the old line until you're sure its no longer needed.  My convention is to append a timestamp of the backup to the filename in the format - ~YYMMDD-HHMM.  For example, 230320-2244 for 10:44pm on Mar 20.  This timestamp then also sorts well in a directory listing.  The {{Command{diff}}} command can be used to compare different versions of a file.

The same concept works well for configuration files in our next class material.
<<<
*Shell scripting quick reference:  http://www.math.uga.edu/~caner/08reu/shell.html
*Awk one liners:  http://www.catonmat.net/blog/wp-content/uploads/2008/09/awk1line.txt
*Sed one liners:  http://www.catonmat.net/blog/wp-content/uploads/2008/09/sed1line.txt
<<search>><<closeAll>><<collapseAll>><<expandAll>><<permaview>><<newTiddler>><<saveChanges>><<slider chkSliderOptionsPanel OptionsPanel "options »" "Change TiddlyWiki advanced options">><<slider chkSliderContents [[TabContents]] 'contents »' 'contents'>>
/*{{{*/

#sidebar {
 color: #000;
 background: transparent;
}

#sidebarOptions {
 background: #fff;
}

#sidebarOptions .button {
 color: #999;
}

#sidebarOptions .button:hover {
 color: #000;
 background: #fff;
 border-color:white;
}

#sidebarOptions .button:active {
 color: #000;
 background: #fff;
}

#sidebarOptions .sliderPanel {
 background: transparent;
}

#sidebarOptions .sliderPanel A:hover {
 color: #000;
 background: #fff;
}

#sidebarOptions .sliderPanel A:active {
 color: #000;
 background: #fff;
}

.sidebarSubHeading {
 color: #000;
}

#sidebarOptions .sliderPanel .tabSelected{
  border: 1px solid #ccc;
  background-color: #fff;
  margin: 0px;
  padding-top: 5px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius-topleft: 1em;
  -moz-border-radius-topright: 1em;}

#sidebarOptions .sliderPanel .tabUnselected{
  border:    1px solid #ccc;
  background-color: #eee;
  margin: 0px;
  padding-top: 5px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius-topleft: 1em;
  -moz-border-radius-topright: 1em;}

#sidebarTabs .tabContents .tiddlyLink:hover {
 background: #fff;
 color: #000;
}

#sidebarTabs .tabContents {
 color: #000;
}

#sidebarTabs .button {
 color: #666;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 2px solid #ccc;
  border-right:  2px solid #ccc;
}

#sidebarTabs .tabContents .button:hover {
 color: #000;
 background: #fff;
}

.tagging, .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }


/*}}}*/

Spring 2024 Course Notes
Introduction to Linux - SUNY Polytechnic Institute NCS 205
[[HorizontalMainMenuStyles]]
[[SideBarStyles]]
[[TagglyTaggingStyles]]

/*{{{*/

body {
  background: #eee; }

h1 {font-size:2.0em; }
h2 { color: #000; background: transparent; text-decoration: underline; }
h3 { margin: 0.0em; color: #000; background: transparent; }
h4,h5 { color: #000; background: transparent; }

h1 {
        margin: 4px 0 4px 0;
	padding: 5px;
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::PrimaryPale]];
}

ul {
	margin-top: 0;
	margin-bottom: 0;
}

.headerShadow {
  padding: 1.0em; }

.headerForeground {
  padding: 1.0em; }

.selected .tagging, .selected .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }

.shadow .title {
  color: #999; }

.siteTitle {
  font-size: 2.5em; }

.siteSubtitle {
  font-size: 1.0em; }

.subtitle {
	font-size: 0.8em;
}

.tagging, .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }

.tiddler {
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  margin: 0.5em;
  background:#fff;
  padding: 0.5em;
  -moz-border-radius: 1em; }

.title {
  color:black;
  font-size: 1.5em; }


.tabSelected{
  padding-top: 0.0em;
  padding-left: 0.5em;
  padding-right: 0.5em;
  -moz-border-radius-topleft: 0.5em;
  -moz-border-radius-topright: 0.5em;}

.tabUnselected {
  padding-top: 0.0em;
  padding-left: 0.5em;
  padding-right: 0.5em;
  -moz-border-radius-topleft: 0.5em;
  -moz-border-radius-topright: 0.5em;}

.tabContents {
  margin: 0px;
  padding-top: 0px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius: 1em; }

.viewer .listTitle {
  list-style-type: none;
}

.viewer pre {
  background-color: #f8f8ff;
  border-color: #ddf; }

#messageArea { background-color:#bde; border-color:#8ab; border-width:4px; border-style:dotted; font-size:90%; }
#messageArea .button { text-decoration:none; font-weight:bold; background:transparent; border:0px; }
#messageArea .button:hover {background: #acd;}
/*}}}*/

/*{{{*/
.Command{color: fuchsia;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;}
.Commandi{color: fuchsia;font-size: 10pt;font-family: Courier, monospace;margin-left: 20px;margin-right: 2px;}
.File{color: #4c7fbc;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px; font-weight:bold;}
.Remove{background-color: orange}
.Host{color: #0f9791;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px; font-weight:bold;}
.Note{display:block;background-color:#e9ffdb;border:1px solid darkgreen;margin: 0 2em 0 2em;padding:5px 5px 5px 5px;}
.Warning{display:block;background-color:#ffee88; border:2px solid darkorange;margin: 0 2em 0 2em;padding:5px 5px 5px 5px;}
.Monospaced{font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;}
.Commands{background-color:#F0F0FF; font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;padding:5px 5px 5px 5px;}


/*}}}*/

 .HideSideBarButton {margin-left: 3em;}

.viewer div.centeredTable {
	text-align: center;
}

.viewer div.centeredTable table {
	margin: 0 auto;
	text-align: left;
}

.viewer table.borderless,
.viewer table.borderless * {
	border: 0;
}
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::PrimaryLight]];} */
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity:60)';}
/*}}}*/
/*{{{*/
body {
	background: [[ColorPalette::Background]];
	color: [[ColorPalette::Foreground]];
}

a{
	color: [[ColorPalette::PrimaryMid]];
}

a:hover{
	background: [[ColorPalette::PrimaryMid]];
	color: [[ColorPalette::Background]];
}

a img{
	border: 0;
}

h1,h2,h3,h4,h5 {
	color: [[ColorPalette::SecondaryDark]];
	background: [[ColorPalette::PrimaryPale]];
}

.button {
	color: [[ColorPalette::PrimaryDark]];
	border: 1px solid [[ColorPalette::Background]];
}

.button:hover {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::SecondaryLight]];
	border-color: [[ColorPalette::SecondaryMid]];
}

.button:active {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::SecondaryMid]];
	border: 1px solid [[ColorPalette::SecondaryDark]];
}

.header {
	background: [[ColorPalette::PrimaryMid]];
}

.headerShadow {
	color: [[ColorPalette::Foreground]];
}

.headerShadow a {
	font-weight: normal;
	color: [[ColorPalette::Foreground]];
}

.headerForeground {
	color: [[ColorPalette::Background]];
}

.headerForeground a {
	font-weight: normal;
	color: [[ColorPalette::PrimaryPale]];
}

.tabSelected{
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::TertiaryPale]];
	border-left: 1px solid [[ColorPalette::TertiaryLight]];
	border-top: 1px solid [[ColorPalette::TertiaryLight]];
	border-right: 1px solid [[ColorPalette::TertiaryLight]];
}

.tabUnselected {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::TertiaryMid]];
}

.tabContents {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::TertiaryPale]];
	border: 1px solid [[ColorPalette::TertiaryLight]];
}

.tabContents .button {
	 border: 0;}

#sidebar {
}

#sidebarOptions input {
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel {
	background: [[ColorPalette::PrimaryPale]];
}

#sidebarOptions .sliderPanel a {
	border: none;
	color: [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel a:hover {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel a:active {
	color: [[ColorPalette::PrimaryMid]];
	background: [[ColorPalette::Background]];
}

.wizard {
	background: [[ColorPalette::SecondaryLight]];
	border-top: 1px solid [[ColorPalette::SecondaryMid]];
	border-left: 1px solid [[ColorPalette::SecondaryMid]];
}

.wizard h1 {
	color: [[ColorPalette::SecondaryDark]];
}

.wizard h2 {
	color: [[ColorPalette::Foreground]];
}

.wizardStep {
	background: [[ColorPalette::Background]];
	border-top: 1px solid [[ColorPalette::SecondaryMid]];
	border-bottom: 1px solid [[ColorPalette::SecondaryMid]];
	border-left: 1px solid [[ColorPalette::SecondaryMid]];
}

.wizard .button {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
	border-top: 1px solid [[ColorPalette::PrimaryLight]];
	border-right: 1px solid [[ColorPalette::PrimaryDark]];
	border-bottom: 1px solid [[ColorPalette::PrimaryDark]];
	border-left: 1px solid [[ColorPalette::PrimaryLight]];
}

.wizard .button:hover {
	color: [[ColorPalette::PrimaryLight]];
	background: [[ColorPalette::PrimaryDark]];
	border-color: [[ColorPalette::PrimaryLight]];
}

.wizard .button:active {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
	border-top: 1px solid [[ColorPalette::PrimaryLight]];
	border-right: 1px solid [[ColorPalette::PrimaryDark]];
	border-bottom: 1px solid [[ColorPalette::PrimaryDark]];
	border-left: 1px solid [[ColorPalette::PrimaryLight]];
}

#messageArea {
	border: 1px solid [[ColorPalette::SecondaryDark]];
	background: [[ColorPalette::SecondaryMid]];
	color: [[ColorPalette::PrimaryDark]];
}

#messageArea .button {
	padding: 0.2em 0.2em 0.2em 0.2em;
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::Background]];
}

.popup {
	background: [[ColorPalette::PrimaryLight]];
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

.popup hr {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::PrimaryDark]];
	border-bottom: 1px;
}

.popup li.disabled {
	color: [[ColorPalette::PrimaryMid]];
}

.popup li a, .popup li a:visited {
	color: [[ColorPalette::TertiaryPale]];
	border: none;
}

.popup li a:hover {
	background: [[ColorPalette::PrimaryDark]];
	color: [[ColorPalette::Background]];
	border: none;
}

.tiddler .defaultCommand {
 font-weight: bold;
}

.shadow .title {
	color: [[ColorPalette::TertiaryDark]];
}

.title {
	color: [[ColorPalette::SecondaryDark]];
}

.subtitle {
	color: [[ColorPalette::TertiaryDark]];
}

.toolbar {
	color: [[ColorPalette::PrimaryMid]];
}

.tagging, .tagged {
	border: 1px solid [[ColorPalette::TertiaryPale]];
	background-color: [[ColorPalette::TertiaryPale]];
}

.selected .tagging, .selected .tagged {
	background-color: [[ColorPalette::TertiaryLight]];
	border: 1px solid [[ColorPalette::TertiaryMid]];
}

.tagging .listTitle, .tagged .listTitle {
	color: [[ColorPalette::PrimaryDark]];
}

.tagging .button, .tagged .button {
		border: none;
}

.footer {
	color: [[ColorPalette::TertiaryLight]];
}

.selected .footer {
	color: [[ColorPalette::TertiaryMid]];
}

.sparkline {
	background: [[ColorPalette::PrimaryPale]];
	border: 0;
}

.sparktick {
	background: [[ColorPalette::PrimaryDark]];
}

.error, .errorButton {
	color: [[ColorPalette::Foreground]];
	background: [[ColorPalette::Error]];
}

.warning {
	color: [[ColorPalette::Foreground]];
	background: [[ColorPalette::SecondaryPale]];
}

.cascade {
	background: [[ColorPalette::TertiaryPale]];
	color: [[ColorPalette::TertiaryMid]];
	border: 1px solid [[ColorPalette::TertiaryMid]];
}

.imageLink, #displayArea .imageLink {
	background: transparent;
}

.viewer .listTitle {list-style-type: none; margin-left: -2em;}

.viewer .button {
	border: 1px solid [[ColorPalette::SecondaryMid]];
}

.viewer blockquote {
	border-left: 3px solid [[ColorPalette::TertiaryDark]];
}

.viewer table {
	border: 2px solid [[ColorPalette::TertiaryDark]];
}

.viewer th, thead td {
	background: [[ColorPalette::SecondaryMid]];
	border: 1px solid [[ColorPalette::TertiaryDark]];
	color: [[ColorPalette::Background]];
}

.viewer td, .viewer tr {
	border: 1px solid [[ColorPalette::TertiaryDark]];
}

.viewer pre {
	border: 1px solid [[ColorPalette::SecondaryLight]];
	background: [[ColorPalette::SecondaryPale]];
}

.viewer code {
	color: [[ColorPalette::SecondaryDark]];
}

.viewer hr {
	border: 0;
	border-top: dashed 1px [[ColorPalette::TertiaryDark]];
	color: [[ColorPalette::TertiaryDark]];
}

.highlight, .marked {
	background: [[ColorPalette::SecondaryLight]];
}

.editor input {
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

.editor textarea {
	border: 1px solid [[ColorPalette::PrimaryMid]];
	width: 100%;
}

.editorFooter {
	color: [[ColorPalette::TertiaryMid]];
}

/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:0em;margin-bottom:0em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0em 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0em 1em 1em; left:0px; top:0px;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0em 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 .3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0em 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0em 0em 0em; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0em;}
.wizardFooter .status {padding:0em 0.4em 0em 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em 0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0em; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em 0.2em 0.2em 0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em 0.2em 0.2em 0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em 1em 1em 1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0em;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0em 0em 0.5em;}
.tab {margin:0em 0em 0em 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0em 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0em 1em;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0em 0.25em; padding:0em 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0em; font-size:.9em;}
.editorFooter .button {padding-top:0px; padding-bottom:0px;}

.fieldsetFix {border:0; padding:0; margin:1px 0px 1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0em; right:0em;}
#backstageButton a {padding:0.1em 0.4em 0.1em 0.4em; margin:0.1em 0.1em 0.1em 0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin:0em 3em 0em 3em; padding:1em 1em 1em 1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em 0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
!Sections in this Tiddler:
*Generic rules
**Links styles
**Link Exceptions
*Header
*Main menu
*Sidebar
**Sidebar options
**Sidebar tabs
*Message area
*Popup
*Tabs
*Tiddler display
**Viewer
**Editor
*Misc. rules
!Generic Rules /%==============================================%/
***/
/*{{{*/
body {
	font-size: .75em;
	font-family: arial,helvetica;
	position: relative;
	margin: 0;
	padding: 0;
}

h1,h2,h3,h4,h5 {
	font-weight: bold;
	text-decoration: none;
	padding-left: 0.4em;
}

h1 {font-size: 1.5em;}
h2 {font-size: 1.25em;}
h3 {font-size: 1.1em;}
h4 {font-size: 1em;}
h5 {font-size: .9em;}

hr {
	height: 1px;
}

a{
	text-decoration: none;
}

ol { list-style-type: decimal }
ol ol { list-style-type: lower-alpha }
ol ol ol { list-style-type: lower-roman }
ol ol ol ol { list-style-type: decimal }
ol ol ol ol ol { list-style-type: lower-alpha }
ol ol ol ol ol ol { list-style-type: lower-roman }
ol ol ol ol ol ol ol { list-style-type: decimal }
/*}}}*/
/***
''General Link Styles'' /%-----------------------------------------------------------------------------%/
***/
/*{{{*/
.externalLink {
	text-decoration: underline;
}

/* the 'a' is required for IE, otherwise it renders the whole tiddler a bold */
a.tiddlyLinkNonExisting.shadow {
	font-weight: bold;
}
/*}}}*/
/***
''Exceptions to common link styles'' /%------------------------------------------------------------------%/
***/
/*{{{*/

#mainMenu .tiddlyLinkExisting,
#mainMenu .tiddlyLinkNonExisting,
#sidebarTabs .tiddlyLinkExisting,
#sidebarTabs .tiddlyLinkNonExisting{
 font-weight: normal;
 font-style: normal;
}

/*}}}*/
/***
!Header /%==================================================%/
***/
/*{{{*/

.header {
		position: relative;
}

.header a:hover {
	background: transparent;
}

.headerShadow {
	position: relative;
	padding: 4.5em 0em 1em 1em;
	left: -1px;
	top: -1px;
}

.headerForeground {
	position: absolute;
	padding: 4.5em 0em 1em 1em;
	left: 0px;
	top: 0px;
}

.siteTitle {
	font-size: 3em;
}

.siteSubtitle {
	font-size: 1.2em;
	padding: 0em 0em 0em 2em;
}

/*}}}*/
/***
!Main menu /%==================================================%/
***/
/*{{{*/
#mainMenu {
	position: absolute;
	left: 0;
	width: 10em;
	text-align: right;
	line-height: 1.6em;
	padding: 1.5em 0.5em 0.5em 0.5em;
	font-size: 1.1em;
}

/*}}}*/
/***
!Sidebar rules /%==================================================%/
***/
/*{{{*/
#sidebar {
	position: absolute;
	right: 3px;
	width: 16em;
	font-size: .9em;
}
/*}}}*/
/***
''Sidebar options'' /%----------------------------------------------------------------------------------%/
***/
/*{{{*/
#sidebarOptions {
	padding-top: 0.3em;
}

#sidebarOptions a {
	margin: 0em 0.2em;
	padding: 0.2em 0.3em;
	display: block;
}

#sidebarOptions input {
	margin: 0.4em 0.5em;
}

#sidebarOptions .sliderPanel {
	margin-left: 1em;
	padding: 0.5em;
	font-size: .85em;
}

#sidebarOptions .sliderPanel a {
	font-weight: bold;
	display: inline;
	padding: 0;
}

#sidebarOptions .sliderPanel input {
	margin: 0 0 .3em 0;
}
/*}}}*/
/***
''Sidebar tabs'' /%-------------------------------------------------------------------------------------%/
***/
/*{{{*/

#sidebarTabs .tabContents {
	width: 15em;
	overflow: hidden;
}

/*}}}*/
/***
!Message area /%==================================================%/
***/
/*{{{*/
#messageArea {
position:absolute; top:0; right:0; margin: 0.5em; padding: 0.5em;
}

*[id='messageArea'] {
position:fixed !important; z-index:99;}

.messageToolbar {
display: block;
text-align: right;
}

#messageArea a{
	text-decoration: underline;
}
/*}}}*/
/***
!Popup /%==================================================%/
***/
/*{{{*/
.popup {
	font-size: .9em;
	padding: 0.2em;
	list-style: none;
	margin: 0;
}

.popup hr {
	display: block;
	height: 1px;
	width: auto;
	padding: 0;
	margin: 0.2em 0em;
}

.popup li.disabled {
	padding: 0.2em;
}

.popup li a{
	display: block;
	padding: 0.2em;
}
/*}}}*/
/***
!Tabs /%==================================================%/
***/
/*{{{*/
.tabset {
	padding: 1em 0em 0em 0.5em;
}

.tab {
	margin: 0em 0em 0em 0.25em;
	padding: 2px;
}

.tabContents {
	padding: 0.5em;
}

.tabContents ul, .tabContents ol {
	margin: 0;
	padding: 0;
}

.txtMainTab .tabContents li {
	list-style: none;
}

.tabContents li.listLink {
	 margin-left: .75em;
}
/*}}}*/
/***
!Tiddler display rules /%==================================================%/
***/
/*{{{*/
#displayArea {
	margin: 1em 17em 0em 14em;
}


.toolbar {
	text-align: right;
	font-size: .9em;
	visibility: hidden;
}

.selected .toolbar {
	visibility: visible;
}

.tiddler {
	padding: 1em 1em 0em 1em;
}

.missing .viewer,.missing .title {
	font-style: italic;
}

.title {
	font-size: 1.6em;
	font-weight: bold;
}

.missing .subtitle {
 display: none;
}

.subtitle {
	font-size: 0.8em;
}

/* I'm not a fan of how button looks in tiddlers... */
.tiddler .button {
	padding: 0.2em 0.4em;
}

.tagging {
margin: 0.5em 0.5em 0.5em 0;
float: left;
display: none;
}

.isTag .tagging {
display: block;
}

.tagged {
margin: 0.5em;
float: right;
}

.tagging, .tagged {
font-size: 0.9em;
padding: 0.25em;
}

.tagging ul, .tagged ul {
list-style: none;margin: 0.25em;
padding: 0;
}

.tagClear {
clear: both;
}

.footer {
	font-size: .9em;
}

.footer li {
display: inline;
}
/***
''The viewer is where the tiddler content is displayed'' /%------------------------------------------------%/
***/
/*{{{*/
* html .viewer pre {
	width: 99%;
	padding: 0 0 1em 0;
}

.viewer {
	line-height: 1.4em;
	padding-top: 0.5em;
}

.viewer .button {
	margin: 0em 0.25em;
	padding: 0em 0.25em;
}

.viewer blockquote {
	line-height: 1.5em;
	padding-left: 0.8em;
	margin-left: 2.5em;
}

.viewer ul, .viewer ol{
	margin-left: 0.5em;
	padding-left: 1.5em;
}

.viewer table {
	border-collapse: collapse;
	margin: 0.8em 1.0em;
}

.viewer th, .viewer td, .viewer tr,.viewer caption{
	padding: 3px;
}

.viewer pre {
	padding: 0.5em;
	margin-left: 0.5em;
	font-size: 1.2em;
	line-height: 1.4em;
	overflow: auto;
}

.viewer code {
	font-size: 1.2em;
	line-height: 1.4em;
}
/*}}}*/
/***
''The editor replaces the viewer in the tiddler'' /%------------------------------------------------%/
***/
/*{{{*/
.editor {
font-size: 1.1em;
}

.editor input, .editor textarea {
	display: block;
	width: 100%;
	font: inherit;
}

.editorFooter {
	padding: 0.25em 0em;
	font-size: .9em;
}

.editorFooter .button {
padding-top: 0px; padding-bottom: 0px;}

.fieldsetFix {border: 0;
padding: 0;
margin: 1px 0px 1px 0px;
}
/*}}}*/
/***
!Misc rules /%==================================================%/
***/
/*{{{*/
.sparkline {
	line-height: 1em;
}

.sparktick {
	outline: 0;
}

.zoomer {
	font-size: 1.1em;
	position: absolute;
	padding: 1em;
}

.cascade {
	font-size: 1.1em;
	position: absolute;
	overflow: hidden;
}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea, #toolbar, #topMenu, #rightMenu {display: none !important;}
#header, #headerShadow {display: none !important;}
.siteSubtitle {display: none !important;}

.siteTitle { font-size: 1.5em; }


#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
Type the text for 'Styles'
<<tabs txtMainTab Timeline Timeline TabTimeline All 'All tiddlers' TabAll Tags 'All tags' TabTags More 'More lists' TabMore>>
The following command table should prove useful for this course.  This is not an extensive list of commands you will need to know / become familiar with.

| !&nbsp;Cover&nbsp; | !&nbsp;Command&nbsp; | !Description |
|>|>|bgcolor(#a0ffa0): ''Basic Commands'' |
| x | echo |output command arguments to the terminal|
| x | cd |change directories|
| x | pwd |display current working directory|
| x | ls |list files|
| x | cp |copy files|
| x | rm |remove files|
| x | mv |move files|
| x | mkdir |create directory|
| x | rmdir |remove directory|
| x | touch |create an empty file with default permissions|
| x | ln |create link|
| x | man |view man pages|
| x | chmod |set permissions for a file|
|  | chgrp |set group for a file|
|>|>|bgcolor(#a0ffa0): ''Display Text / Editors'' |
| x | less |display text output one page at a time|
|  | pico |easy to use text editor|
|  | nano |GNU clone of pico|
| x | vi |advanced unix text editor|
|  | ex |line oriented version of vi|
|  | vim |vi improved|
| x | vimtutor |learn how to use the vim editor|
|>|>|bgcolor(#a0ffa0): ''Filters'' |
| x | cat |concatenate and print files|
| x | grep |pattern matching filter|
| x | egrep |extended regular expression pattern matching filter|
| x | head |display first lines of a file|
| x | tail |display the last part of a file|
| x | cut |cut out selected portions of each line of a file|
| x | fold |fold long lines for finite width output device|
| x | sort |sort lines of text files|
| x | uniq |report or filter out repeated lines in a file|
| x | wc |word, line, character, and byte count|
| x | tr |translate characters|
|  | paste |merge lines of input|
|  | nl |line numbering filter|
| x | sed |stream editor|
| x | awk |pattern-directed scanning and processing language|
| x | tee |duplicate standard input to a file|
|  | strings |print the strings of printable characters in (binary) files|
|  | cmp |compare two files|
|  | diff |compare files line by line|
|  | comm |select or reject lines common to two files|
|>|>|bgcolor(#a0ffa0): ''System Commands'' |
| x | script |save copy of terminal session|
|  | source |read a .file|
| x | rehash |recompute hash table of where commands are located|
| x | which |scan path for a program and return its location (or definition of an alias)|
| x | df |display free disk space|
| x | du |disk usage (-s display each file, -k 1K blocks , -h = human readable|
| x | find |walk a file hierarchy in search of files|
|  | locate |find filenames quickly based on pre-generated file database|
|  | hostname |print name of current host system|
| x | uptime |show how long system has been running|
| x | uname |display information about the system|
|  | xargs |construct argument list(s) and execute utility|
|  | quota |display disk usage and limits|
|  | crontab |schedule commands for automated execution on regular intervals|
|  | at |schedule a job for later execution|
|>|>|bgcolor(#a0ffa0): ''Process Management / Job Control'' |
| x | ps |process status|
| x | top |display and update information about the top cpu processes|
| x | kill |terminate or signal a process|
| x | jobs |display all jobs|
| x | fg |continue background jobs in the foreground|
| x | bg |continue suspended job in the background|
| x | stop |suspend job running in the background|
|  | suspend |suspend the current running shell|
|>|>|bgcolor(#a0ffa0): ''User Information'' |
| x | w |display who is logged in and what they are doing|
| x | id |return user identity|
| x | groups |show group memberships|
|  | users |list usernames of current logged in users|
|  | who |display who is on the system|
|  | whoami |display effective user id|
| x | finger |user information lookup program|
| x | last |indicate last logins of users and ttys|
|>|>|bgcolor(#a0ffa0): ''Misc commands useful for shell scripting'' |
| x | clear |clear the screen|
| x | read //var// |prompt the user to enter information, saving to //var//|
| x | date |display the current date and time with optional formatting.  see strftime manpage|
| x | test |condition evaluation utility.  Linked to [  See test manpage.|
| x | expr |evaluate an expression|
| x | jot |print sequential or random numbers|
|  | sleep //n// |pause execution for //n// seconds|
|  | stat |display extended file status/information|
|  | stty |set the options for a terminal device interface|
|  | basename |return the file name portion of a path|
|  | dirname |return the directory name portion of a path|
|  | fstat |List open files or determine whether specified file is open|
| x | exit [//n//] |log out or quit a script with the option exit status of //n//|
|>|>|bgcolor(#a0ffa0): ''Networking / Communication'' |
| x | ssh |~OpenSSH SSH client|
| x | scp |secure copy (remote file copy program)|
|  | rsync |a fast, versatile, remote (and local) file-copying tool|
|  | telnet |user interface to the TELNET protocol.  also useful for testing connectivity to arbitrary ports|
|  | talk / ytalk |talk to another user|
|  | write |send a message to another user|
|  | mesg |display (do not display) messages from other users|
|  | host |DNS lookup utility|
|  | nslookup |query Internet name servers interactively|
|  | traceroute |print the route packets take to network host|
|  | ping |send ICMP ~ECHO_REQUEST packets to network hosts|
|  | lynx / links |character mode WWW browser|
|>|>|bgcolor(#a0ffa0): ''Text Formatting & Printing'' |
| x | lpr |command line print utility|
| x | lpq |print spool queue examination program|
| x | lprm |remove jobs from the line printer spooling queue|
| x | pdf2ps |Ghostscript PDF to ~PostScript translator|
| x | a2ps |format files for printing on a ~PostScript printer|
|>|>|bgcolor(#a0ffa0): ''Working with files'' |
| x | file |display file type|
| x | tar |manipulate file archive files|
| x | gzip |compression tool using ~Lempel-Ziv coding|
| x | gunzip |decompression tool using ~Lempel-Ziv coding|
| x | bzip2 |a block-sorting file compressor|
| x | bunzip2 |a block-sorting file decompressor|
|  | split |split a file into pieces|
| x | md5 / md5sum |calculate a message-digest fingerprint (checksum) for a file (freebsd / linux)|
|  | srm |securely remove files or directories|
|  | rsync |a fast, versatile, remote (and local) file-copying tool|
/***
|Name|TagglyListPlugin|
|Created by|SimonBaird|
|Location|http://simonbaird.com/mptw/#TagglyListPlugin|
|Version|1.1.2 25-Apr-06|
|Requires|See TagglyTagging|

!History
* 1.1.2 (25-Apr-2006) embedded TagglyTaggingStyles. No longer need separated tiddler for styles.
* 1.1.1 (6-Mar-2006) fixed bug with refreshAllVisible closing tiddlers being edited. Thanks Luke Blanshard.

***/

/***
!Setup and config
***/
//{{{

version.extensions.TagglyListPlugin = {
	major: 1, minor: 1, revision: 2,
	date: new Date(2006,4,25),
	source: "http://simonbaird.com/mptw/#TagglyListPlugin"
};

config.macros.tagglyList = {};
config.macros.tagglyListByTag = {};
config.macros.tagglyListControl = {};
config.macros.tagglyListWithSort = {};
config.macros.hideSomeTags = {};

// change this to your preference
config.macros.tagglyListWithSort.maxCols = 6;

config.macros.tagglyList.label = "Tagged as %0:";

// the default sort options. set these to your preference
config.macros.tagglyListWithSort.defaults = {
 sortBy:"title", // title|created|modified
 sortOrder: "asc", // asc|desc
 hideState: "show", // show|hide
 groupState: "nogroup", // nogroup|group
 numCols: 1
};

// these tags will be ignored by the grouped view
config.macros.tagglyListByTag.excludeTheseTags = [
 "systemConfig",
 "TiddlerTemplates"
];

config.macros.tagglyListControl.tags = {
 title:"sortByTitle",
 modified: "sortByModified",
 created: "sortByCreated",
 asc:"sortAsc",
 desc:"sortDesc",
 hide:"hideTagged",
 show:"showTagged",
 nogroup:"noGroupByTag",
 group:"groupByTag",
 cols1:"list1Cols",
 cols2:"list2Cols",
 cols3:"list3Cols",
 cols4:"list4Cols",
 cols5:"list5Cols",
 cols6:"list6Cols",
 cols7:"list7Cols",
 cols8:"list8Cols",
 cols9:"list9Cols"
}

// note: should match config.macros.tagglyListControl.tags
config.macros.hideSomeTags.tagsToHide = [
 "sortByTitle",
 "sortByCreated",
 "sortByModified",
 "sortDesc",
 "sortAsc",
 "hideTagged",
 "showTagged",
 "noGroupByTag",
 "groupByTag",
 "list1Cols",
 "list2Cols",
 "list3Cols",
 "list4Cols",
 "list5Cols",
 "list6Cols",
 "list7Cols",
 "list8Cols",
 "list9Cols"
];


//}}}
/***

!Utils
***/
//{{{
// from Eric
function isTagged(title,tag) {
 var t=store.getTiddler(title); if (!t) return false;
 return (t.tags.find(tag)!=null);
}

// from Eric
function toggleTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 if (t.tags.find(tag)==null) t.tags.push(tag);
 else t.tags.splice(t.tags.find(tag),1);
}

function addTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 t.tags.push(tag);
}

function removeTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 if (t.tags.find(tag)!=null) t.tags.splice(t.tags.find(tag),1);
}

// from Udo
Array.prototype.indexOf = function(item) {
 for (var i = 0; i < this.length; i++) {
 if (this[i] == item) {
 return i;
 }
 }
 return -1;
};
Array.prototype.contains = function(item) {
 return (this.indexOf(item) >= 0);
}
//}}}
/***

!tagglyList
displays a list of tagged tiddlers.
parameters are sortField and sortOrder
***/
//{{{

// not used at the moment...
function sortedListOfOtherTags(tiddler,thisTag) {
 var list = tiddler.tags.concat(); // so we are working on a clone..
 for (var i=0;i<config.macros.hideSomeTags.tagsToHide.length;i++) {
 if (list.find(config.macros.hideSomeTags.tagsToHide[i]) != null)
 list.splice(list.find(config.macros.hideSomeTags.tagsToHide[i]),1); // remove hidden ones
 }
 for (var i=0;i<config.macros.tagglyListByTag.excludeTheseTags.length;i++) {
 if (list.find(config.macros.tagglyListByTag.excludeTheseTags[i]) != null)
 list.splice(list.find(config.macros.tagglyListByTag.excludeTheseTags[i]),1); // remove excluded ones
 }
 list.splice(list.find(thisTag),1); // remove thisTag
 return '[[' + list.sort().join("]] [[") + ']]';
}

function sortHelper(a,b) {
 if (a == b) return 0;
 else if (a < b) return -1;
 else return +1;
}

config.macros.tagglyListByTag.handler = function (place,macroName,params,wikifier,paramString,tiddler) {

 var sortBy = params[0] ? params[0] : "title";
 var sortOrder = params[1] ? params[1] : "asc";

 var result = store.getTaggedTiddlers(tiddler.title,sortBy);

 if (sortOrder == "desc")
 result = result.reverse();

 var leftOvers = []
 for (var i=0;i<result.length;i++) {
 leftOvers.push(result[i].title);
 }

 var allTagsHolder = {};
 for (var i=0;i<result.length;i++) {
 for (var j=0;j<result[i].tags.length;j++) {

 if (
 result[i].tags[j] != tiddler.title // not this tiddler
 && config.macros.hideSomeTags.tagsToHide.find(result[i].tags[j]) == null // not a hidden one
 && config.macros.tagglyListByTag.excludeTheseTags.find(result[i].tags[j]) == null // not excluded
 ) {
 if (!allTagsHolder[result[i].tags[j]])
 allTagsHolder[result[i].tags[j]] = "";
 allTagsHolder[result[i].tags[j]] += "**[["+result[i].title+"]]\n";

 if (leftOvers.find(result[i].title) != null)
 leftOvers.splice(leftOvers.find(result[i].title),1); // remove from leftovers. at the end it will contain the leftovers...
 }
 }
 }


 var allTags = [];
 for (var t in allTagsHolder)
 allTags.push(t);

 allTags.sort(function(a,b) {
 var tidA = store.getTiddler(a);
 var tidB = store.getTiddler(b);
 if (sortBy == "title") return sortHelper(a,b);
 else if (!tidA && !tidB) return 0;
 else if (!tidA) return -1;
 else if (!tidB) return +1;
 else return sortHelper(tidA[sortBy],tidB[sortBy]);
 });

 var markup = "";

 if (sortOrder == "desc") {
 allTags.reverse();
 }
 else {
 // leftovers first...
 for (var i=0;i<leftOvers.length;i++)
 markup += "*[["+leftOvers[i]+"]]\n";
 }

 for (var i=0;i<allTags.length;i++)
 markup += "*[["+allTags[i]+"]]\n" + allTagsHolder[allTags[i]];

 if (sortOrder == "desc") {
 // leftovers last...
 for (var i=0;i<leftOvers.length;i++)
 markup += "*[["+leftOvers[i]+"]]\n";
 }

 wikify(markup,place);
}

config.macros.tagglyList.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 var sortBy = params[0] ? params[0] : "title";
 var sortOrder = params[1] ? params[1] : "asc";
 var numCols = params[2] ? params[2] : 1;

 var result = store.getTaggedTiddlers(tiddler.title,sortBy);
 if (sortOrder == "desc")
 result = result.reverse();

 var listSize = result.length;
 var colSize = listSize/numCols;
 var remainder = listSize % numCols;

 var upperColsize;
 var lowerColsize;
 if (colSize != Math.floor(colSize)) {
 // it's not an exact fit so..
 lowerColsize = Math.floor(colSize);
 upperColsize = Math.floor(colSize) + 1;
 }
 else {
 lowerColsize = colSize;
 upperColsize = colSize;
 }

 var markup = "";
 var c=0;

 var newTaggedTable = createTiddlyElement(place,"table");
 var newTaggedBody = createTiddlyElement(newTaggedTable,"tbody");
 var newTaggedTr = createTiddlyElement(newTaggedBody,"tr");

 for (var j=0;j<numCols;j++) {
 var foo = "";
 var thisSize;

 if (j<remainder)
 thisSize = upperColsize;
 else
 thisSize = lowerColsize;

 for (var i=0;i<thisSize;i++)
 foo += ( "*[[" + result[c++].title + "]]\n"); // was using splitList.shift() but didn't work in IE;

 var newTd = createTiddlyElement(newTaggedTr,"td",null,"tagglyTagging");
 wikify(foo,newTd);

 }

};

/* snip for later.....
 //var groupBy = params[3] ? params[3] : "t.title.substr(0,1)";
 //var groupBy = params[3] ? params[3] : "sortedListOfOtherTags(t,tiddler.title)";
 //var groupBy = params[3] ? params[3] : "t.modified";
 var groupBy = null; // for now. groupBy here is working but disabled for now.

 var prevGroup = "";
 var thisGroup = "";

 if (groupBy) {
 result.sort(function(a,b) {
 var t = a; var aSortVal = eval(groupBy); var aSortVal2 = eval("t".sortBy);
 var t = b; var bSortVal = eval(groupBy); var bSortVal2 = eval("t".sortBy);
 var t = b; var bSortVal2 = eval(groupBy);
 return (aSortVal == bSortVal ?
 (aSortVal2 == bSortVal2 ? 0 : (aSortVal2 < bSortVal2 ? -1 : +1)) // yuck
 : (aSortVal < bSortVal ? -1 : +1));
 });
 }

 if (groupBy) {
 thisGroup = eval(groupBy);
 if (thisGroup != prevGroup)
 markup += "*[["+thisGroup+']]\n';
 markup += "**[["+t.title+']]\n';
 prevGroup = thisGroup;
 }



*/


//}}}

/***

!tagglyListControl
Use to make the sort control buttons
***/
//{{{

function getSortBy(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.sortBy;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["title"])) return "title";
 else if (tiddler.tags.contains(usetags["modified"])) return "modified";
 else if (tiddler.tags.contains(usetags["created"])) return "created";
 else return defaultVal;
}

function getSortOrder(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.sortOrder;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["asc"])) return "asc";
 else if (tiddler.tags.contains(usetags["desc"])) return "desc";
 else return defaultVal;
}

function getHideState(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.hideState;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["hide"])) return "hide";
 else if (tiddler.tags.contains(usetags["show"])) return "show";
 else return defaultVal;
}

function getGroupState(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.groupState;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["group"])) return "group";
 else if (tiddler.tags.contains(usetags["nogroup"])) return "nogroup";
 else return defaultVal;
}

function getNumCols(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.numCols; // an int
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 for (var i=1;i<=config.macros.tagglyListWithSort.maxCols;i++)
 if (tiddler.tags.contains(usetags["cols"+i])) return i;
 return defaultVal;
}


function getSortLabel(title,which) {
 // TODO. the strings here should be definable in config
 var by = getSortBy(title);
 var order = getSortOrder(title);
 var hide = getHideState(title);
 var group = getGroupState(title);
 if (which == "hide") return (hide == "show" ? "−" : "+"); // 0x25b8;
 else if (which == "group") return (group == "group" ? "normal" : "grouped");
 else if (which == "cols") return "cols±"; // &plusmn;
 else if (by == which) return which + (order == "asc" ? "↓" : "↑"); // &uarr; &darr;
 else return which;
}

function handleSortClick(title,which) {
 var currentSortBy = getSortBy(title);
 var currentSortOrder = getSortOrder(title);
 var currentHideState = getHideState(title);
 var currentGroupState = getGroupState(title);
 var currentNumCols = getNumCols(title);

 var tags = config.macros.tagglyListControl.tags;

 // if it doesn't exist, lets create it..
 if (!store.getTiddler(title))
 store.saveTiddler(title,title,"",config.options.txtUserName,new Date(),null);

 if (which == "hide") {
 // toggle hide state
 var newHideState = (currentHideState == "hide" ? "show" : "hide");
 removeTag(title,tags[currentHideState]);
 if (newHideState != config.macros.tagglyListWithSort.defaults.hideState)
 toggleTag(title,tags[newHideState]);
 }
 else if (which == "group") {
 // toggle hide state
 var newGroupState = (currentGroupState == "group" ? "nogroup" : "group");
 removeTag(title,tags[currentGroupState]);
 if (newGroupState != config.macros.tagglyListWithSort.defaults.groupState)
 toggleTag(title,tags[newGroupState]);
 }
 else if (which == "cols") {
 // toggle num cols
 var newNumCols = currentNumCols + 1; // confusing. currentNumCols is an int
 if (newNumCols > config.macros.tagglyListWithSort.maxCols || newNumCols > store.getTaggedTiddlers(title).length)
 newNumCols = 1;
 removeTag(title,tags["cols"+currentNumCols]);
 if (("cols"+newNumCols) != config.macros.tagglyListWithSort.defaults.groupState)
 toggleTag(title,tags["cols"+newNumCols]);
 }
 else if (currentSortBy == which) {
 // toggle sort order
 var newSortOrder = (currentSortOrder == "asc" ? "desc" : "asc");
 removeTag(title,tags[currentSortOrder]);
 if (newSortOrder != config.macros.tagglyListWithSort.defaults.sortOrder)
 toggleTag(title,tags[newSortOrder]);
 }
 else {
 // change sortBy only
 removeTag(title,tags["title"]);
 removeTag(title,tags["created"]);
 removeTag(title,tags["modified"]);

 if (which != config.macros.tagglyListWithSort.defaults.sortBy)
 toggleTag(title,tags[which]);
 }

 store.setDirty(true); // save is required now.
 story.refreshTiddler(title,false,true); // force=true
}

config.macros.tagglyListControl.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 var onclick = function(e) {
 if (!e) var e = window.event;
 handleSortClick(tiddler.title,params[0]);
 e.cancelBubble = true;
 if (e.stopPropagation) e.stopPropagation();
 return false;
 };
 createTiddlyButton(place,getSortLabel(tiddler.title,params[0]),"Click to change sort options",onclick,params[0]=="hide"?"hidebutton":"button");
}
//}}}
/***

!tagglyListWithSort
put it all together..
***/
//{{{
config.macros.tagglyListWithSort.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 if (tiddler && store.getTaggedTiddlers(tiddler.title).length > 0)
  // todo make this readable
 wikify(
 "<<tagglyListControl hide>>"+
 (getHideState(tiddler.title) != "hide" ?
 '<html><span class="tagglyLabel">'+config.macros.tagglyList.label.format([tiddler.title])+' </span></html>'+
 "<<tagglyListControl title>><<tagglyListControl modified>><<tagglyListControl created>><<tagglyListControl group>>"+(getGroupState(tiddler.title)=="group"?"":"<<tagglyListControl cols>>")+"\n" +
 "<<tagglyList" + (getGroupState(tiddler.title)=="group"?"ByTag ":" ") + getSortBy(tiddler.title)+" "+getSortOrder(tiddler.title)+" "+getNumCols(tiddler.title)+">>" // hacky
 // + \n----\n" +
 //"<<tagglyList "+getSortBy(tiddler.title)+" "+getSortOrder(tiddler.title)+">>"
 : ""),
 place,null,tiddler);
}

config.macros.tagglyTagging = { handler: config.macros.tagglyListWithSort.handler };


//}}}
/***

!hideSomeTags
So we don't see the sort tags.
(note, they are still there when you edit. Will that be too annoying?
***/
//{{{

// based on tags.handler
config.macros.hideSomeTags.handler = function(place,macroName,params,wikifier,paramString,tiddler) {
 var theList = createTiddlyElement(place,"ul");
 if(params[0] && store.tiddlerExists[params[0]])
 tiddler = store.getTiddler(params[0]);
 var lingo = config.views.wikified.tag;
 var prompt = tiddler.tags.length == 0 ? lingo.labelNoTags : lingo.labelTags;
 createTiddlyElement(theList,"li",null,"listTitle",prompt.format([tiddler.title]));
 for(var t=0; t<tiddler.tags.length; t++)
 if (!this.tagsToHide.contains(tiddler.tags[t])) // this is the only difference from tags.handler...
 createTagButton(createTiddlyElement(theList,"li"),tiddler.tags[t],tiddler.title);

}

//}}}
/***

!Refresh everything when we save a tiddler. So the tagged lists never get stale. Is this too slow???
***/
//{{{

function refreshAllVisible() {
 story.forEachTiddler(function(title,element) {
   if (element.getAttribute("dirty") != "true")
     story.refreshTiddler(title,false,true);
 });
}

story.saveTiddler_orig_mptw = story.saveTiddler;
story.saveTiddler = function(title,minorUpdate) {
 var result = this.saveTiddler_orig_mptw(title,minorUpdate);
// refreshAllVisible();
 return result;
}

store.removeTiddler_orig_mptw = store.removeTiddler;
store.removeTiddler = function(title) {
 this.removeTiddler_orig_mptw(title);
// refreshAllVisible();
}

config.shadowTiddlers.TagglyTaggingStyles = "/***\nTo use, add {{{[[TagglyTaggingStyles]]}}} to your StyleSheet tiddler, or you can just paste the CSS in directly. See also ViewTemplate, EditTemplate and TagglyTagging.\n***/\n/*{{{*/\n.tagglyTagged li.listTitle { display:none;}\n.tagglyTagged li { display: inline; font-size:90%; }\n.tagglyTagged ul { margin:0px; padding:0px; }\n.tagglyTagging { padding-top:0.5em; }\n.tagglyTagging li.listTitle { display:none;}\n.tagglyTagging ul { margin-top:0px; padding-top:0.5em; padding-left:2em; margin-bottom:0px; padding-bottom:0px; }\n\n/* .tagglyTagging .tghide { display:inline; } */\n\n.tagglyTagging { vertical-align: top; margin:0px; padding:0px; }\n.tagglyTagging table { margin:0px; padding:0px; }\n\n\n.tagglyTagging .button { display:none; margin-left:3px; margin-right:3px; }\n.tagglyTagging .button, .tagglyTagging .hidebutton { color:#aaa; font-size:90%; border:0px; padding-left:0.3em;padding-right:0.3em;}\n.tagglyTagging .button:hover, .hidebutton:hover { background:#eee; color:#888; }\n.selected .tagglyTagging .button { display:inline; }\n\n.tagglyTagging .hidebutton { color:white; } /* has to be there so it takes up space. tweak if you're not using a white tiddler bg */\n.selected .tagglyTagging .hidebutton { color:#aaa }\n\n.tagglyLabel { color:#aaa; font-size:90%; }\n\n.tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }\n.tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}\n.tagglyTagging ul ul li {margin-left:0.5em; }\n\n.editLabel { font-size:90%; padding-top:0.5em; }\n/*}}}*/\n";

refreshStyles("TagglyTaggingStyles");


//}}}

// // <html>&#x25b8;&#x25be;&minus;&plusmn;</html>
Type the text for 'TagglyTagging'
/***
|Name:|TagglyTaggingPlugin|
|Description:|tagglyTagging macro is a replacement for the builtin tagging macro in your ViewTemplate|
|Version:|3.3.1 ($Rev: 6100 $)|
|Date:|$Date: 2008-07-27 01:42:07 +1000 (Sun, 27 Jul 2008) $|
|Source:|http://mptw.tiddlyspot.com/#TagglyTaggingPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License:|http://mptw.tiddlyspot.com/#TheBSDLicense|
!Notes
See http://mptw.tiddlyspot.com/#TagglyTagging
***/
//{{{

merge(String.prototype,{

	parseTagExpr: function(debug) {

		if (this.trim() == "")
			return "(true)";

		var anyLogicOp = /(!|&&|\|\||\(|\))/g;
		var singleLogicOp = /^(!|&&|\|\||\(|\))$/;

		var spaced = this.
			// because square brackets in templates are no good
			// this means you can use [(With Spaces)] instead of [[With Spaces]]
			replace(/\[\(/g," [[").
			replace(/\)\]/g,"]] ").
			// space things out so we can use readBracketedList. tricky eh?
			replace(anyLogicOp," $1 ");

		var expr = "";

		var tokens = spaced.readBracketedList(false); // false means don't uniq the list. nice one JR!

		for (var i=0;i<tokens.length;i++)
			if (tokens[i].match(singleLogicOp))
				expr += tokens[i];
			else
				expr += "tiddler.tags.contains('%0')".format([tokens[i].replace(/'/,"\\'")]); // fix single quote bug. still have round bracket bug i think

		if (debug)
			alert(expr);

		return '('+expr+')';
	}

});

merge(TiddlyWiki.prototype,{
	getTiddlersByTagExpr: function(tagExpr,sortField) {

		var result = [];

		var expr = tagExpr.parseTagExpr();

		store.forEachTiddler(function(title,tiddler) {
			if (eval(expr))
				result.push(tiddler);
		});

		if(!sortField)
			sortField = "title";

		result.sort(function(a,b) {return a[sortField] < b[sortField] ? -1 : (a[sortField] == b[sortField] ? 0 : +1);});

		return result;
	}
});

config.taggly = {

	// for translations
	lingo: {
		labels: {
			asc:        "\u2191", // down arrow
			desc:       "\u2193", // up arrow
			title:      "title",
			modified:   "modified",
			created:    "created",
			show:       "+",
			hide:       "-",
			normal:     "normal",
			group:      "group",
			commas:     "commas",
			sitemap:    "sitemap",
			numCols:    "cols\u00b1", // plus minus sign
			label:      "Tagged as '%0':",
			exprLabel:  "Matching tag expression '%0':",
			excerpts:   "excerpts",
			descr:      "descr",
			slices:     "slices",
			contents:   "contents",
			sliders:    "sliders",
			noexcerpts: "title only",
			noneFound:  "(none)"
		},

		tooltips: {
			title:      "Click to sort by title",
			modified:   "Click to sort by modified date",
			created:    "Click to sort by created date",
			show:       "Click to show tagging list",
			hide:       "Click to hide tagging list",
			normal:     "Click to show a normal ungrouped list",
			group:      "Click to show list grouped by tag",
			sitemap:    "Click to show a sitemap style list",
			commas:     "Click to show a comma separated list",
			numCols:    "Click to change number of columns",
			excerpts:   "Click to show excerpts",
			descr:      "Click to show the description slice",
			slices:     "Click to show all slices",
			contents:   "Click to show entire tiddler contents",
			sliders:    "Click to show tiddler contents in sliders",
			noexcerpts: "Click to show entire title only"
		},

		tooDeepMessage: "* //sitemap too deep...//"
	},

	config: {
		showTaggingCounts: true,
		listOpts: {
			// the first one will be the default
			sortBy:     ["title","modified","created"],
			sortOrder:  ["asc","desc"],
			hideState:  ["show","hide"],
			listMode:   ["normal","group","sitemap","commas"],
			numCols:    ["1","2","3","4","5","6"],
			excerpts:   ["noexcerpts","excerpts","descr","slices","contents","sliders"]
		},
		valuePrefix: "taggly.",
		excludeTags: ["excludeLists","excludeTagging"],
		excerptSize: 50,
		excerptMarker: "/%"+"%/",
		siteMapDepthLimit: 25
	},

	getTagglyOpt: function(title,opt) {
		var val = store.getValue(title,this.config.valuePrefix+opt);
		return val ? val : this.config.listOpts[opt][0];
	},

	setTagglyOpt: function(title,opt,value) {
		if (!store.tiddlerExists(title))
			// create it silently
			store.saveTiddler(title,title,config.views.editor.defaultText.format([title]),config.options.txtUserName,new Date(),"");
		// if value is default then remove it to save space
		return store.setValue(title,
			this.config.valuePrefix+opt,
			value == this.config.listOpts[opt][0] ? null : value);
	},

	getNextValue: function(title,opt) {
		var current = this.getTagglyOpt(title,opt);
		var pos = this.config.listOpts[opt].indexOf(current);
		// a little usability enhancement. actually it doesn't work right for grouped or sitemap
		var limit = (opt == "numCols" ? store.getTiddlersByTagExpr(title).length : this.config.listOpts[opt].length);
		var newPos = (pos + 1) % limit;
		return this.config.listOpts[opt][newPos];
	},

	toggleTagglyOpt: function(title,opt) {
		var newVal = this.getNextValue(title,opt);
		this.setTagglyOpt(title,opt,newVal);
	},

	createListControl: function(place,title,type) {
		var lingo = config.taggly.lingo;
		var label;
		var tooltip;
		var onclick;

		if ((type == "title" || type == "modified" || type == "created")) {
			// "special" controls. a little tricky. derived from sortOrder and sortBy
			label = lingo.labels[type];
			tooltip = lingo.tooltips[type];

			if (this.getTagglyOpt(title,"sortBy") == type) {
				label += lingo.labels[this.getTagglyOpt(title,"sortOrder")];
				onclick = function() {
					config.taggly.toggleTagglyOpt(title,"sortOrder");
					return false;
				}
			}
			else {
				onclick = function() {
					config.taggly.setTagglyOpt(title,"sortBy",type);
					config.taggly.setTagglyOpt(title,"sortOrder",config.taggly.config.listOpts.sortOrder[0]);
					return false;
				}
			}
		}
		else {
			// "regular" controls, nice and simple
			label = lingo.labels[type == "numCols" ? type : this.getNextValue(title,type)];
			tooltip = lingo.tooltips[type == "numCols" ? type : this.getNextValue(title,type)];
			onclick = function() {
				config.taggly.toggleTagglyOpt(title,type);
				return false;
			}
		}

		// hide button because commas don't have columns
		if (!(this.getTagglyOpt(title,"listMode") == "commas" && type == "numCols"))
			createTiddlyButton(place,label,tooltip,onclick,type == "hideState" ? "hidebutton" : "button");
	},

	makeColumns: function(orig,numCols) {
		var listSize = orig.length;
		var colSize = listSize/numCols;
		var remainder = listSize % numCols;

		var upperColsize = colSize;
		var lowerColsize = colSize;

		if (colSize != Math.floor(colSize)) {
			// it's not an exact fit so..
			upperColsize = Math.floor(colSize) + 1;
			lowerColsize = Math.floor(colSize);
		}

		var output = [];
		var c = 0;
		for (var j=0;j<numCols;j++) {
			var singleCol = [];
			var thisSize = j < remainder ? upperColsize : lowerColsize;
			for (var i=0;i<thisSize;i++)
				singleCol.push(orig[c++]);
			output.push(singleCol);
		}

		return output;
	},

	drawTable: function(place,columns,theClass) {
		var newTable = createTiddlyElement(place,"table",null,theClass);
		var newTbody = createTiddlyElement(newTable,"tbody");
		var newTr = createTiddlyElement(newTbody,"tr");
		for (var j=0;j<columns.length;j++) {
			var colOutput = "";
			for (var i=0;i<columns[j].length;i++)
				colOutput += columns[j][i];
			var newTd = createTiddlyElement(newTr,"td",null,"tagglyTagging"); // todo should not need this class
			wikify(colOutput,newTd);
		}
		return newTable;
	},

	createTagglyList: function(place,title,isTagExpr) {
		switch(this.getTagglyOpt(title,"listMode")) {
			case "group":  return this.createTagglyListGrouped(place,title,isTagExpr); break;
			case "normal": return this.createTagglyListNormal(place,title,false,isTagExpr); break;
			case "commas": return this.createTagglyListNormal(place,title,true,isTagExpr); break;
			case "sitemap":return this.createTagglyListSiteMap(place,title,isTagExpr); break;
		}
	},

	getTaggingCount: function(title,isTagExpr) {
		// thanks to Doug Edmunds
		if (this.config.showTaggingCounts) {
			var tagCount = config.taggly.getTiddlers(title,'title',isTagExpr).length;
			if (tagCount > 0)
				return " ("+tagCount+")";
		}
		return "";
	},

	getTiddlers: function(titleOrExpr,sortBy,isTagExpr) {
		return isTagExpr ? store.getTiddlersByTagExpr(titleOrExpr,sortBy) : store.getTaggedTiddlers(titleOrExpr,sortBy);
	},

	getExcerpt: function(inTiddlerTitle,title,indent) {
		if (!indent)
			indent = 1;

		var displayMode = this.getTagglyOpt(inTiddlerTitle,"excerpts");
		var t = store.getTiddler(title);

		if (t && displayMode == "excerpts") {
			var text = t.text.replace(/\n/," ");
			var marker = text.indexOf(this.config.excerptMarker);
			if (marker != -1) {
				return " {{excerpt{<nowiki>" + text.substr(0,marker) + "</nowiki>}}}";
			}
			else if (text.length < this.config.excerptSize) {
				return " {{excerpt{<nowiki>" + t.text + "</nowiki>}}}";
			}
			else {
				return " {{excerpt{<nowiki>" + t.text.substr(0,this.config.excerptSize) + "..." + "</nowiki>}}}";
			}
		}
		else if (t && displayMode == "contents") {
			return "\n{{contents indent"+indent+"{\n" + t.text + "\n}}}";
		}
		else if (t && displayMode == "sliders") {
			return "<slider slide>\n{{contents{\n" + t.text + "\n}}}\n</slider>";
		}
		else if (t && displayMode == "descr") {
			var descr = store.getTiddlerSlice(title,'Description');
			return descr ? " {{excerpt{" + descr  + "}}}" : "";
		}
		else if (t && displayMode == "slices") {
			var result = "";
			var slices = store.calcAllSlices(title);
			for (var s in slices)
				result += "|%0|<nowiki>%1</nowiki>|\n".format([s,slices[s]]);
			return result ? "\n{{excerpt excerptIndent{\n" + result  + "}}}" : "";
		}
		return "";
	},

	notHidden: function(t,inTiddler) {
		if (typeof t == "string")
			t = store.getTiddler(t);
		return (!t || !t.tags.containsAny(this.config.excludeTags) ||
				(inTiddler && this.config.excludeTags.contains(inTiddler)));
	},

	// this is for normal and commas mode
	createTagglyListNormal: function(place,title,useCommas,isTagExpr) {

		var list = config.taggly.getTiddlers(title,this.getTagglyOpt(title,"sortBy"),isTagExpr);

		if (this.getTagglyOpt(title,"sortOrder") == "desc")
			list = list.reverse();

		var output = [];
		var first = true;
		for (var i=0;i<list.length;i++) {
			if (this.notHidden(list[i],title)) {
				var countString = this.getTaggingCount(list[i].title);
				var excerpt = this.getExcerpt(title,list[i].title);
				if (useCommas)
					output.push((first ? "" : ", ") + "[[" + list[i].title + "]]" + countString + excerpt);
				else
					output.push("*[[" + list[i].title + "]]" + countString + excerpt + "\n");

				first = false;
			}
		}

		return this.drawTable(place,
			this.makeColumns(output,useCommas ? 1 : parseInt(this.getTagglyOpt(title,"numCols"))),
			useCommas ? "commas" : "normal");
	},

	// this is for the "grouped" mode
	createTagglyListGrouped: function(place,title,isTagExpr) {
		var sortBy = this.getTagglyOpt(title,"sortBy");
		var sortOrder = this.getTagglyOpt(title,"sortOrder");

		var list = config.taggly.getTiddlers(title,sortBy,isTagExpr);

		if (sortOrder == "desc")
			list = list.reverse();

		var leftOvers = []
		for (var i=0;i<list.length;i++)
			leftOvers.push(list[i].title);

		var allTagsHolder = {};
		for (var i=0;i<list.length;i++) {
			for (var j=0;j<list[i].tags.length;j++) {

				if (list[i].tags[j] != title) { // not this tiddler

					if (this.notHidden(list[i].tags[j],title)) {

						if (!allTagsHolder[list[i].tags[j]])
							allTagsHolder[list[i].tags[j]] = "";

						if (this.notHidden(list[i],title)) {
							allTagsHolder[list[i].tags[j]] += "**[["+list[i].title+"]]"
										+ this.getTaggingCount(list[i].title) + this.getExcerpt(title,list[i].title) + "\n";

							leftOvers.setItem(list[i].title,-1); // remove from leftovers. at the end it will contain the leftovers

						}
					}
				}
			}
		}

		var allTags = [];
		for (var t in allTagsHolder)
			allTags.push(t);

		var sortHelper = function(a,b) {
			if (a == b) return 0;
			if (a < b) return -1;
			return 1;
		};

		allTags.sort(function(a,b) {
			var tidA = store.getTiddler(a);
			var tidB = store.getTiddler(b);
			if (sortBy == "title") return sortHelper(a,b);
			else if (!tidA && !tidB) return 0;
			else if (!tidA) return -1;
			else if (!tidB) return +1;
			else return sortHelper(tidA[sortBy],tidB[sortBy]);
		});

		var leftOverOutput = "";
		for (var i=0;i<leftOvers.length;i++)
			if (this.notHidden(leftOvers[i],title))
				leftOverOutput += "*[["+leftOvers[i]+"]]" + this.getTaggingCount(leftOvers[i]) + this.getExcerpt(title,leftOvers[i]) + "\n";

		var output = [];

		if (sortOrder == "desc")
			allTags.reverse();
		else if (leftOverOutput != "")
			// leftovers first...
			output.push(leftOverOutput);

		for (var i=0;i<allTags.length;i++)
			if (allTagsHolder[allTags[i]] != "")
				output.push("*[["+allTags[i]+"]]" + this.getTaggingCount(allTags[i]) + this.getExcerpt(title,allTags[i]) + "\n" + allTagsHolder[allTags[i]]);

		if (sortOrder == "desc" && leftOverOutput != "")
			// leftovers last...
			output.push(leftOverOutput);

		return this.drawTable(place,
				this.makeColumns(output,parseInt(this.getTagglyOpt(title,"numCols"))),
				"grouped");

	},

	// used to build site map
	treeTraverse: function(title,depth,sortBy,sortOrder,isTagExpr) {

		var list = config.taggly.getTiddlers(title,sortBy,isTagExpr);

		if (sortOrder == "desc")
			list.reverse();

		var indent = "";
		for (var j=0;j<depth;j++)
			indent += "*"

		var childOutput = "";

		if (depth > this.config.siteMapDepthLimit)
			childOutput += indent + this.lingo.tooDeepMessage;
		else
			for (var i=0;i<list.length;i++)
				if (list[i].title != title)
					if (this.notHidden(list[i].title,this.config.inTiddler))
						childOutput += this.treeTraverse(list[i].title,depth+1,sortBy,sortOrder,false);

		if (depth == 0)
			return childOutput;
		else
			return indent + "[["+title+"]]" + this.getTaggingCount(title) + this.getExcerpt(this.config.inTiddler,title,depth) + "\n" + childOutput;
	},

	// this if for the site map mode
	createTagglyListSiteMap: function(place,title,isTagExpr) {
		this.config.inTiddler = title; // nasty. should pass it in to traverse probably
		var output = this.treeTraverse(title,0,this.getTagglyOpt(title,"sortBy"),this.getTagglyOpt(title,"sortOrder"),isTagExpr);
		return this.drawTable(place,
				this.makeColumns(output.split(/(?=^\*\[)/m),parseInt(this.getTagglyOpt(title,"numCols"))), // regexp magic
				"sitemap"
				);
	},

	macros: {
		tagglyTagging: {
			handler: function (place,macroName,params,wikifier,paramString,tiddler) {
				var parsedParams = paramString.parseParams("tag",null,true);
				var refreshContainer = createTiddlyElement(place,"div");

				// do some refresh magic to make it keep the list fresh - thanks Saq
				refreshContainer.setAttribute("refresh","macro");
				refreshContainer.setAttribute("macroName",macroName);

				var tag = getParam(parsedParams,"tag");
				var expr = getParam(parsedParams,"expr");

				if (expr) {
					refreshContainer.setAttribute("isTagExpr","true");
					refreshContainer.setAttribute("title",expr);
					refreshContainer.setAttribute("showEmpty","true");
				}
				else {
					refreshContainer.setAttribute("isTagExpr","false");
					if (tag) {
        				refreshContainer.setAttribute("title",tag);
						refreshContainer.setAttribute("showEmpty","true");
					}
					else {
        				refreshContainer.setAttribute("title",tiddler.title);
						refreshContainer.setAttribute("showEmpty","false");
					}
				}
				this.refresh(refreshContainer);
			},

			refresh: function(place) {
				var title = place.getAttribute("title");
				var isTagExpr = place.getAttribute("isTagExpr") == "true";
				var showEmpty = place.getAttribute("showEmpty") == "true";
				removeChildren(place);
				addClass(place,"tagglyTagging");
				var countFound = config.taggly.getTiddlers(title,'title',isTagExpr).length
				if (countFound > 0 || showEmpty) {
					var lingo = config.taggly.lingo;
					config.taggly.createListControl(place,title,"hideState");
					if (config.taggly.getTagglyOpt(title,"hideState") == "show") {
						createTiddlyElement(place,"span",null,"tagglyLabel",
								isTagExpr ? lingo.labels.exprLabel.format([title]) : lingo.labels.label.format([title]));
						config.taggly.createListControl(place,title,"title");
						config.taggly.createListControl(place,title,"modified");
						config.taggly.createListControl(place,title,"created");
						config.taggly.createListControl(place,title,"listMode");
						config.taggly.createListControl(place,title,"excerpts");
						config.taggly.createListControl(place,title,"numCols");
						config.taggly.createTagglyList(place,title,isTagExpr);
						if (countFound == 0 && showEmpty)
							createTiddlyElement(place,"div",null,"tagglyNoneFound",lingo.labels.noneFound);
					}
				}
			}
		}
	},

	// todo fix these up a bit
	styles: [
"/*{{{*/",
"/* created by TagglyTaggingPlugin */",
".tagglyTagging { padding-top:0.5em; }",
".tagglyTagging li.listTitle { display:none; }",
".tagglyTagging ul {",
"	margin-top:0px; padding-top:0.5em; padding-left:2em;",
"	margin-bottom:0px; padding-bottom:0px;",
"}",
".tagglyTagging { vertical-align: top; margin:0px; padding:0px; }",
".tagglyTagging table { margin:0px; padding:0px; }",
".tagglyTagging .button { visibility:hidden; margin-left:3px; margin-right:3px; }",
".tagglyTagging .button, .tagglyTagging .hidebutton {",
"	color:[[ColorPalette::TertiaryLight]]; font-size:90%;",
"	border:0px; padding-left:0.3em;padding-right:0.3em;",
"}",
".tagglyTagging .button:hover, .hidebutton:hover, ",
".tagglyTagging .button:active, .hidebutton:active  {",
"	border:0px; background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]];",
"}",
".selected .tagglyTagging .button { visibility:visible; }",
".tagglyTagging .hidebutton { color:[[ColorPalette::Background]]; }",
".selected .tagglyTagging .hidebutton { color:[[ColorPalette::TertiaryLight]] }",
".tagglyLabel { color:[[ColorPalette::TertiaryMid]]; font-size:90%; }",
".tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }",
".tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}",
".tagglyTagging ul ul li {margin-left:0.5em; }",
".editLabel { font-size:90%; padding-top:0.5em; }",
".tagglyTagging .commas { padding-left:1.8em; }",
"/* not technically tagglytagging but will put them here anyway */",
".tagglyTagged li.listTitle { display:none; }",
".tagglyTagged li { display: inline; font-size:90%; }",
".tagglyTagged ul { margin:0px; padding:0px; }",
".excerpt { color:[[ColorPalette::TertiaryDark]]; }",
".excerptIndent { margin-left:4em; }",
"div.tagglyTagging table,",
"div.tagglyTagging table tr,",
"td.tagglyTagging",
" {border-style:none!important; }",
".tagglyTagging .contents { border-bottom:2px solid [[ColorPalette::TertiaryPale]]; padding:0 1em 1em 0.5em;",
"  margin-bottom:0.5em; }",
".tagglyTagging .indent1  { margin-left:3em;  }",
".tagglyTagging .indent2  { margin-left:4em;  }",
".tagglyTagging .indent3  { margin-left:5em;  }",
".tagglyTagging .indent4  { margin-left:6em;  }",
".tagglyTagging .indent5  { margin-left:7em;  }",
".tagglyTagging .indent6  { margin-left:8em;  }",
".tagglyTagging .indent7  { margin-left:9em;  }",
".tagglyTagging .indent8  { margin-left:10em; }",
".tagglyTagging .indent9  { margin-left:11em; }",
".tagglyTagging .indent10 { margin-left:12em; }",
".tagglyNoneFound { margin-left:2em; color:[[ColorPalette::TertiaryMid]]; font-size:90%; font-style:italic; }",
"/*}}}*/",
		""].join("\n"),

	init: function() {
		merge(config.macros,this.macros);
		config.shadowTiddlers["TagglyTaggingStyles"] = this.styles;
		store.addNotification("TagglyTaggingStyles",refreshStyles);
	}
};

config.taggly.init();

//}}}

/***
InlineSlidersPlugin
By Saq Imtiaz
http://tw.lewcid.org/sandbox/#InlineSlidersPlugin

// syntax adjusted to not clash with NestedSlidersPlugin
// added + syntax to start open instead of closed

***/
//{{{
config.formatters.unshift( {
	name: "inlinesliders",
	// match: "\\+\\+\\+\\+|\\<slider",
	match: "\\<slider",
	// lookaheadRegExp: /(?:\+\+\+\+|<slider) (.*?)(?:>?)\n((?:.|\n)*?)\n(?:====|<\/slider>)/mg,
	lookaheadRegExp: /(?:<slider)(\+?) (.*?)(?:>)\n((?:.|\n)*?)\n(?:<\/slider>)/mg,
	handler: function(w) {
		this.lookaheadRegExp.lastIndex = w.matchStart;
		var lookaheadMatch = this.lookaheadRegExp.exec(w.source)
		if(lookaheadMatch && lookaheadMatch.index == w.matchStart ) {
			var btn = createTiddlyButton(w.output,lookaheadMatch[2] + " "+"\u00BB",lookaheadMatch[2],this.onClickSlider,"button sliderButton");
			var panel = createTiddlyElement(w.output,"div",null,"sliderPanel");
			panel.style.display = (lookaheadMatch[1] == '+' ? "block" : "none");
			wikify(lookaheadMatch[3],panel);
			w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
		}
   },
   onClickSlider : function(e) {
		if(!e) var e = window.event;
		var n = this.nextSibling;
		n.style.display = (n.style.display=="none") ? "block" : "none";
		return false;
	}
});

//}}}

/*{{{*/
/* created by TagglyTaggingPlugin */
.tagglyTagging { padding-top:0.5em; }
.tagglyTagging li.listTitle { display:none; }
.tagglyTagging ul {
	margin-top:0px; padding-top:0.5em; padding-left:2em;
	margin-bottom:0px; padding-bottom:0px;
}
.tagglyTagging { vertical-align: top; margin:0px; padding:0px; }
.tagglyTagging table { margin:0px; padding:0px; }
.tagglyTagging .button { visibility:hidden; margin-left:3px; margin-right:3px; }
.tagglyTagging .button, .tagglyTagging .hidebutton {
	color:[[ColorPalette::TertiaryLight]]; font-size:90%;
	border:0px; padding-left:0.3em;padding-right:0.3em;
}
.tagglyTagging .button:hover, .hidebutton:hover,
.tagglyTagging .button:active, .hidebutton:active  {
	border:0px; background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]];
}
.selected .tagglyTagging .button { visibility:visible; }
.tagglyTagging .hidebutton { color:[[ColorPalette::Background]]; }
.selected .tagglyTagging .hidebutton { color:[[ColorPalette::TertiaryLight]] }
.tagglyLabel { color:[[ColorPalette::TertiaryMid]]; font-size:90%; }
.tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }
.tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}
.tagglyTagging ul ul li {margin-left:0.5em; }
.editLabel { font-size:90%; padding-top:0.5em; }
.tagglyTagging .commas { padding-left:1.8em; }
/* not technically tagglytagging but will put them here anyway */
.tagglyTagged li.listTitle { display:none; }
.tagglyTagged li { display: inline; font-size:90%; }
.tagglyTagged ul { margin:0px; padding:0px; }
.excerpt { color:[[ColorPalette::TertiaryDark]]; }
.excerptIndent { margin-left:4em; }
div.tagglyTagging table,
div.tagglyTagging table tr,
td.tagglyTagging
 {border-style:none!important; }
.tagglyTagging .contents { border-bottom:2px solid [[ColorPalette::TertiaryPale]]; padding:0 1em 1em 0.5em;
  margin-bottom:0.5em; }
.tagglyTagging .indent1  { margin-left:3em;  }
.tagglyTagging .indent2  { margin-left:4em;  }
.tagglyTagging .indent3  { margin-left:5em;  }
.tagglyTagging .indent4  { margin-left:6em;  }
.tagglyTagging .indent5  { margin-left:7em;  }
.tagglyTagging .indent6  { margin-left:8em;  }
.tagglyTagging .indent7  { margin-left:9em;  }
.tagglyTagging .indent8  { margin-left:10em; }
.tagglyTagging .indent9  { margin-left:11em; }
.tagglyTagging .indent10 { margin-left:12em; }
.tagglyNoneFound { margin-left:2em; color:[[ColorPalette::TertiaryMid]]; font-size:90%; font-style:italic; }
/*}}}*/
! The vi editor

!! vi Intro

!!! Background: 
* ex, vi, vim
** {{Command{ex}}} = line oriented text editor (for printed output / slow displays / modems)
*** demonstrate ''c'' (change) and ''i'' (insert) commands.  Go to a line number, use command with ''.'' to return to prompt.
** {{Command{vi}}} = screen oriented instead of line oriented
*** Different modes - either entering text or executing commands
*** Commands are either {{Command{vi}}} commands or {{Command{ex}}} commands.
** {{Command{ex}}} & {{Command{vi}}} are different interfaces to the same program
** {{Command{ex}}} & {{Command{vi}}} began with original unix versions, over 30 years ago
** {{Command{vi}}} is now the standard unix text editor
** {{Command{vim}}} = vi Improved - extra commands and functionality

!!! Using vi:
* Opening a document for editing loads it into a buffer, which is the in-memory text of a file.  
** Any changes are made to the buffer and not saved to the file until the //write// command is provided.
* There are two Modes:
** Command mode - where you provide commands to the editor
*** These may be either {{Command{vi}}} or {{Command{ex}}} commands
** Input mode - where you can interact with the content of the file
*** You'll typically see the string ''-- INSERT --'' in the bottom-left corner when you're in Input Mode
*** Leave input mode by pressing ESC
* vi commands (command mode) contain an operator (what to do) and scope (what to do it on)
** Examples:
*** {{Monospaced{''d$''}}} - delete (d) all text from the cursor to the end of the line ($ typically means end of line)
*** {{Monospaced{''dw''}}} - delete (d) the current word
*** {{Monospaced{''d5w''}}} - delete (d) the current and next 4 (5) words (w)
*** {{Monospaced{''d2d''}}} - delete (d) the current and next 1 (2) line (d)
*** {{Monospaced{''cw''}}} - change (c) the next word (w), placing you in input mode
*** {{Monospaced{''ct:''}}} - change (c) all characters until (t) the next colon (:)
* Searching with ''/'' and ''?''
** Search down with the ''/'' key
** Search up with the ''?'' key
*** After you type either ''/'' and ''?'', you cursor will move to the bottom-left corner and you will be prompted to enter a search string.  Press enter to begin the search.
** Repeat your last search with ''n''


!! Using ex commands in vi

The {{Command{vi}}} editor is a the ''vi''sual screen-oriented front-end for the {{Command{ex}}} line-oriented text editor.  {{Command{ex}}} was one of the original Unix text editors from the days where text files could only be displayed and edited one line at a time.  It wasn't yet possible to display a full screen of text.  The ''vi''sual functionality was supported after technology evolved to support full-screen document editing.  {{Command{vi}}} also supports the original {{Command{ex}}} commands for manipulating a document.  These commands bring a great deal of power to the editor and make solving complex tasks rather simple.

* Press the : (colon) key to enter {{Command{ex}}} command mode when you are no in Input mode.  Your cursor will move to the bottom left corner.
* {{Command{ex}}} commands will be displayed on the bottom status line.  Press ~CTRL-C to cancel the command and return to vi mode.
* Syntax: {{Monospaced{'' :[address]command ''}}}
** {{Monospaced{'' :[address] ''}}} is an optional component which allows you to specify which lines to act upon.  

!!! Valid address formats
* Addresses may be addressed singly:
** {{Monospaced{''.''}}} - represents current line (default if no address is specified)
** {{Monospaced{''//n//''}}} - a specific line number
** {{Monospaced{''$''}}} - last line in the file
* or as a range:
**{{Monospaced{''%''}}} - Whole file
** {{Monospaced{''address1,address2''}}} - from address1 to address2.
** Also includes +//n// and -//n// to include the next or previous //n// lines
* Examples:
** {{Monospaced{'':12,20d''}}} - delete lines 12 to 20
** {{Monospaced{'':.,+5''}}}  - current and next five lines
** {{Monospaced{'':10,$''}}} - lines 10 through the end of the file
** {{Monospaced{'':$-2,$''}}} - last three lines (last line and two previous)

!!! Most useful ex commands

* ''d'' - delete lines
** {{Monospaced{'':10d''}}} - delete line 10
** {{Monospaced{'' :1,10d ''}}} - delete lines 1 to 10
* ''e'' - edit
** {{Monospaced{'':e! ''}}} - reopen current file, discarding changes
* ''s'' - substitute
**{{Monospaced{'' :s/one/two/ ''}}} - change first instances of one to two on the current line
**{{Monospaced{'' :%s/one/two/ ''}}} - change first instance of one to two on all lines in the document
**{{Monospaced{'' :%s/one/two/g ''}}} - change all instances of one to two on all lines in the document
**{{Monospaced{'' :.,+5s/one/two/g ''}}} - change all instances of one to two on current and next 5 lines.
* ''g'' - globally execute specified commands on lines containing a particular pattern
** {{Monospaced{'' :g/stuff/d ''}}} - delete all lines containing the string stuff
** {{Monospaced{'' :g/lpd-errs/s/^/#/ ''}}}  - add a comment to the beginning of the line on all lines containing the string lpd-errors
** {{Monospaced{'' :10,20/g/stuff/d ''}}} - remove lines between lines 10 and 20 that contain the string delete 

----


!! More info
*vi handouts: [[vi Diagram|handouts/viDiagram.pdf]] & [[Old Handout|handouts/viHandout.pdf]]
*{{Command{vimtutor}}} command
*http://www.gentoo.org/doc/en/vi-guide.xml
*[[UNIX Command summary|handouts/UnixCommandSummary.pdf]] back page

http://docstore.mik.ua/orelly/unix/unixnut/ch09_01.htm


! Using the compilers

Also a simple exercise to get more practice editing text files with vi

{{Command{gcc}}} & {{Command{g++}}}
Use {{Command{gcc}}} for compiling C code and {{Command{g++}}} for compiling C++ code.  Source code file extensions must either be .c or .cpp

{{Command{gcc -o //name_of_executable// source.c}}}
{{Command{g++ -o //name_of_executable// source.cpp}}}

//name_of_executable// = executable file to create after compiling your source code, instead of using the default a.out

{{{
#include <stdio.h>

main()
{
    printf("Hello World in C\n\n");
}
}}}

{{{
#include <iostream>
using namespace std;
int main()
{ 
  cout << "Hello World!" << endl;
  return 0;
}
}}}


! Assignments

!! Read :
 - Chapter 12 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!! Complete: 
 - [[Lab 27|labs/lab27.pdf]] & [[Lab 28|labs/lab28.pdf]] 
 - These labs are optional for additional vi practice and will be accepted for extra credit.



/***

|Name|ToggleSideBarMacro|
|Created by|SaqImtiaz|
|Location|http://lewcid.googlepages.com/lewcid.html#ToggleSideBarMacro|
|Version|1.0|
|Requires|~TW2.x|
!Description:
Provides a button for toggling visibility of the SideBar. You can choose whether the SideBar should initially be hidden or displayed.

!Demo
<<toggleSideBar "Toggle Sidebar">>

!Usage:
{{{<<toggleSideBar>>}}} <<toggleSideBar>>
additional options:
{{{<<toggleSideBar label tooltip show/hide>>}}} where:
label = custom label for the button,
tooltip = custom tooltip for the button,
show/hide = use one or the other, determines whether the sidebar is shown at first or not.
(default is to show the sidebar)

You can add it to your tiddler toolbar, your MainMenu, or where you like really.
If you are using a horizontal MainMenu and want the button to be right aligned, put the following in your StyleSheet:
{{{ .HideSideBarButton {float:right;} }}}

!History
*23-07-06: version 1.0: completely rewritten, now works with custom stylesheets too, and easier to customize start behaviour.
*20-07-06: version 0.11
*27-04-06: version 0.1: working.

!Code
***/
//{{{
config.macros.toggleSideBar={};

config.macros.toggleSideBar.settings={
         styleHide :  "#sidebar { display: none;}\n"+"#contentWrapper #displayArea { margin-right: 1em;}\n"+"",
         styleShow : " ",
         arrow1: "«",
         arrow2: "»"
};

config.macros.toggleSideBar.handler=function (place,macroName,params,wikifier,paramString,tiddler)
{
          var tooltip= params[1]||'toggle sidebar';
          var mode = (params[2] && params[2]=="hide")? "hide":"show";
          var arrow = (mode == "hide")? this.settings.arrow1:this.settings.arrow2;
          var label= (params[0]&&params[0]!='.')?params[0]+" "+arrow:arrow;
          var theBtn = createTiddlyButton(place,label,tooltip,this.onToggleSideBar,"button HideSideBarButton");
          if (mode == "hide")
             {
             (document.getElementById("sidebar")).setAttribute("toggle","hide");
              setStylesheet(this.settings.styleHide,"ToggleSideBarStyles");
             }
};

config.macros.toggleSideBar.onToggleSideBar = function(){
          var sidebar = document.getElementById("sidebar");
          var settings = config.macros.toggleSideBar.settings;
          if (sidebar.getAttribute("toggle")=='hide')
             {
              setStylesheet(settings.styleShow,"ToggleSideBarStyles");
              sidebar.setAttribute("toggle","show");
              this.firstChild.data= (this.firstChild.data).replace(settings.arrow1,settings.arrow2);
              }
          else
              {
               setStylesheet(settings.styleHide,"ToggleSideBarStyles");
               sidebar.setAttribute("toggle","hide");
               this.firstChild.data= (this.firstChild.data).replace(settings.arrow2,settings.arrow1);
              }

     return false;
}

setStylesheet(".HideSideBarButton .button {font-weight:bold; padding: 0 5px;}\n","ToggleSideBarButtonStyles");

//}}}
|~ViewToolbar|closeTiddler closeOthers editTiddler > fields syncing permalink references jump|
|~EditToolbar|+saveTiddler -cancelTiddler deleteTiddler|

{{Note{This video is a nice demo and overview on how this all works.  It may be helpful to review it before proceeding.  https://www.youtube.com/watch?v=XFJ6_BYno08}}}

!! Defeating firewalls with SSH to access protected resources

Knowing how to more fully use SSH and it's tunneling and proxy capabilities to defeat firewalls or access private IP addresses over the internet is an excellent skill for a security practitioner to have!  There are two methods we can use with SSH to defeat firewalls and access these resources:

A.  Dynamic application-level port forwarding (SOCKS proxy)
<<<
Specifies local "dynamic" application-level port forwarding.  This works by allocating a socket to listen to a port on the local side, optionally bound to the specified bind_address.  Whenever a connection is made to this port, the connection is forwarded over the secure channel, and the application protocol is then used to determine where to connect to from the remote machine.
<<<

B.  Port forwarding
<<<
Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to the given host and port, or Unix socket, on the remote side.  This works by allocating a socket to listen to either a TCP port on the local side, optionally bound to the specified bind_address, or to a Unix socket.  Whenever a connection is made to the local port or socket, the connection is forwarded over the secure channel, and a connection is made to either host port hostport, or the Unix socket remote_socket, from the remote machine.
<<<

Method A. functions as a traditional application-level proxy.  You would configure your application (eg: web browser) to proxy all connections through the tunnel.  Method B. creates a 1:1 connection:  a TCP port on your local PC is tunneled through the SSH connection to a specific IP address and TCP port on the other side.  This method is best when there is no option to configure a proxy in your application. 

We're going to use method ''A'' for accessing internal web resources behind our class router.  This grants us the most flexibility since our browser allows us to configure an application-level proxy.


Before you begin, open your web browser and load the page http://ifconfig.me.  Take note of the IP address displayed.  We will compare this to the IP address you receive after everything is set up.  


!!! A. Establishing a SOCKS proxy with SSH

A proxy is a middle man, passing on network requests to their destination on your behalf.

A SOCKS proxy (socket secure) is a protocol to route packets between a client and a server through an intermediate proxy.  This is used (typically for web traffic) when the client is not able to communicate with the server directly, but the client can communicate with the proxy system and the chosen proxy can communicate with the server.  Some sites set up a proxy for web traffic as a means to enforce policy, monitor traffic, and block direct connections to web sites.

Here, your home PC cannot access your web server VM or the Naemon monitoring server but the class shell server can.  We'll use the class shell server to proxy your browser's web connections and be the middleman for your web requests.  This diagram illustrates the overall goal.  We see your proxy connection traveling through the encrypted SSH tunnel to the class shell server.  Web requests are then made from the perspective of the class shell server.

[img[img/proxy.png]]


SSH can be used to establish a SOCKS proxy.  This functionality is available from putty or the command line ~OpenSSH

''1.'' To set up the Proxy on your home PC, complete either ''a)'' or ''b)'', depending on your OS:

''a)'' If your home OS is Mac or Unix:  This command will create an encrypted proxy tunnel between your PC and the specified host, in this case our class shell server.  Traffic connecting to your PC on port 8118 will then pass through this proxy.  Execute a similar command on your home computer.  You may also need to update the username.
<<<
Set up SOCKS proxy:  {{Command{ssh -p 2205 -D 8118 lab.ncs205.net}}}
<<<

''b)'' Follow these steps when connecting with Putty from your home Windows PC:
<<<
* Expand the Connection / SSH menu
* Select Tunnels
* Enter ''8118'' in the Source port box
* Select ''Dynamic''
* Click Add
* Connect to a remote host (the class shell server) as normal
<<<
* [[This video|Putty Proxy]] demonstrates configuring Putty to add the dynamic tunnel.


''2.'' Your browser must be configured to pass traffic through the encrypted proxy.

I use the ~FoxyProxy extension to easily toggle between proxy settings in my browser.  It can also be configured to automatically send only selected sites through the proxy.  
* Firefox:
** [[Firefox Extension|https://addons.mozilla.org/en-US/firefox/addon/foxyproxy-standard/]]
** ~NCS205 settings file for use in Firefox: [[FoxyProxy-ncs205.json|https://www.ncs205.net/media/FoxyProxy-ncs205.json]]
* Chrome:
** [[Chrome Extension|https://chrome.google.com/webstore/detail/foxyproxy-standard/gcknhkkoolaabfmlnjonogaaifnjlfnp]]
** I don't have a version of the config for Chrome; you're on your own for now and will need to configure it manually.
** Be sure to check the //Proxy DNS// option


Install the browser extension, import the settings file, and enable the proxy.

* [[This video|Firefox Proxy]] demonstrates using Firefox with the proxy to access an internal website


!!! B. Verification

Verification should be built into everything you configure.  Now that your proxy is established, let's verify it is functioning correctly and web connections from Firefox are flowing through the class infrastructure.  Load the page http://ifconfig.me again in your browser and observe the IP address.  It should have changed from the original value you observed and instead contain the public IP address of the class shell server.  Next, run the command {{Command{curl ifconfig.me}}} on the class shell server.  The IP address in your browser and displayed on the command line should match. This will confirm your traffic is now properly going through the proxy.

With the class server acting as a middle man, you can now load internal resources in this web browser which would have otherwise been blocked from the outside world.


!!! C. Naemon infrastructure monitoring

[[Naemon|https://www.naemon.org/]] is a tool which continuously monitors resources to provide a high level view of the health of an environment.  I'm running a Naemon server to monitor your ~VMs and use it to assist with grading your labs.  You can also use it to monitor the state of your systems and correct any issues it discovers.

Naemon is running on the internal class network and is not directly accessible from the outside world.  You will need to bypass the router and use the class shell server as a proxy in order to reach it.  

Once the proxy is configured in your browser, navigate to the URL &nbsp; '' http://head.ncs205.net/ ''.  The username is ''ncs205'' and password is ''naemon''.

This video contains a brief [[Naemon Introduction]].


{{Note{Naemon status checks run every two hours.  If you fix a problem, you will either need to wait up to two hours for the recheck or force Naemon to recheck.}}}

{{Warning{Warning: Naemon checks are not a replacement for your own sound testing and verification.  They may return false positives and negatives.  Not every possibility can be evaluated.  They are only a troubleshooting and status aid; not a definitive determination that something is correct.  I will still perform manual testing for most of your labs that Naemon cannot fully evaluate.}}}
!! Asking for help
* Title your posts appropriately.  Use something descriptive in the name and not just the lab and question number.  A subject like @@Lab 17, #2 - Incorrectly discarding data@@ is far more helpful than something generic like ''Lab 17''.
* When asking for help in blackboard, be sure to include relevant supporting information.  You'll receive faster responses if you provide everything someone needs to help you.
** If you're asking about a lab question, include that question in your post so everyone doesn't need to first look at the lab.
** Did you receive an error from a command?  Be sure to include the error and the command you ran.  The shell prompt will also include helpful information:
*** The host you're running the command on
*** The user you're running the command as
*** A portion of the current working directory.  Including the full output of the {{Command{pwd}}} command would be helpful too
*** The exact command string you're running.
** Don't forget to include any relevent log information and troubleshooting steps you've already taken.  You're more likely to get help if you start the process and can describe what you've already done to troubleshoot.
* Be sure to review everything for typos first.  Too many posts to Blackboard asking for help will be for problems caused by typos.  Save some time and check your typing first.
* Screenshots are helpful too.  Pictures are worth a thousand words.

!! Posting Screenshots
When posting screenshots, use the Insert Local Files(circled) in Blackboard.  Don't attach a file.  It's much easier work with embedded images than ones that need to be opened in a new tab.

@@display:block;text-align:center;[img[img/screenshots.png]]@@


!! Pasting in terminal output
Everyone should be using the Blackboard discussion boards during the course of the semester and will likely need to paste in output from the command line at some point. 

Aesthetics and readability should be considered in everything you produce.  We can make our post easier to read with a couple additional steps.

''1.'' Paste your copied text from the terminal where you would like it to appear.  Finish typing out your message.  Before sending, change the formatting for the portions you pasted from the terminal.

''2.'' Select the text you pasted in and change the paragraph type to Formatted Code.  This will remove the double spacing.

''3.'' Select the text you pasted in and choose the font ''Courier New''.  All commands and text copied from the terminal should be written with a monospaced font like Courier New to make spacing uniform between the characters and show that what you're typing is a command or output from one.

''4.''  Select the command you executed to get the output and change it to bold.  This makes it easier to identify the command that was used from the output returned.  Including the shell prompt and executed command provides important context.

''5.'' If appropriate, use the highlighter to draw attention to any parts you're talking about.  Be sure to first change the color to a brighter one.

@@display:block;text-align:center;[img[img/blackboard4.png]]@@

You'll finally be left with something that is much easier to read.  You're more likely to get a response to your forum post if you provide all necessary information in a way that's easy to work with.  Pasting text like this is preferable to just posting a screenshot.  If you paste in the text, someone can quote it in a reply and easily highlight relevant parts.

@@display:block;text-align:center;[img[img/blackboard3.png]]@@
/%


----

 avoid Blackboard's text mangling and

 If you paste copied text from the terminal, blackboard will turn it into a mangled mess:

than the Blackboard mangled mess


@@display:block;text-align:center;[img[img/blackboard0.png]]@@


''1.'' Insert a few blank lines where you want to put the pasted text.  These blank lines will make it easier to add additional text after inserting your pasted text from the terminal

''2.'' Choose the HTML editor from the Toolbar

@@display:block;text-align:center;[img[img/blackboard1.png]]@@


''4.'' Add a {{Command{&lt;pre>}}} HTML tag before your pasted text and a {{Command{&lt;/pre>}}} tag after it.  This will prevent the mangled formatting and preserve all spacing, just as you see it in the terminal.

@@display:block;text-align:center;[img[img/blackboard2.png]]@@

''5.'' Click update.  You should now see your copied text nicely formatted in Blackboard.

''6.'' Select the text you pasted in and choose the font ''Courier New''.  All commands and text copied from the terminal should be written with a monospaced font like Courier New to make spacing uniform and highlight what you're typing is a command or output from one.

''7.''  Highlight the command you executed to get the output and change it to bold.  This makes it easier to identify the command that was used from the output returned.

''8.'' If appropriate, use the highlighter to draw attention to any parts you're talking about.  Be sure to first change the color to a brighter one.

@@display:block;text-align:center;[img[img/blackboard4.png]]@@


You'll finally be left with something that is much easier to read than the Blackboard mangled mess.  You're more likely to get a response to your forum post if it is easier to read.  Pasting text like this is preferable to just posting a screenshot.  If you paste in the text, someone can quote it in a reply and easily highlight relevant parts.

@@display:block;text-align:center;[img[img/blackboard3.png]]@@  %/

!! Using Discord
We'll need to keep Discord organized in order to keep it useful.  Get in the habit of this now, because you'll have these same issues later in the workplace.  The concepts are very similar to what we need to do on Slack in the corporate world.

!!! There are four types of channels:
# //administrative// - Administrative questions about the class like grading, due dates, and technical support issues.  Not for course content.
# //misc-chatter// - Conversation not related to this class
# //resources// - Posts about general course notes and resources that might be helpful for others
# //week#// - The weekly course content discussions.  Post to this channel regarding material that was ''//assigned//'' in this week.
** For example, if you have a question about a week 1 lab, post it to the week 1 channel even if we're now in week 2.

!! Asking for help
* Use threads for your questions to help keep things organized.
** See below for an example on using threads
** Title your threads appropriately.  Use something descriptive in the name and not just the lab and question number.  A subject like @@Lab 17, #2 - Incorrectly discarding data@@ is far more helpful than something generic like ''Lab 17''.
** Organization and usability is important in everything you do.  Full credit for asking a question and providing help will only be given for conversations which are within threads.
* When asking for help, be sure to include relevant supporting information.  You'll receive faster responses if you provide everything someone needs to help you.
** If you're asking about a lab question, including that question in your post is helpful so everyone doesn't need to first look at the lab to know what you're talking about.
** Send us what you're seeing, don't just describe it.  A picture is worth a thousand words
*** Did you receive an error from a command?  Be sure to include the error and the command you ran.  
*** The shell prompt will also include helpful information, such as:
**** The host you're running the command on
**** The user you're running the command as
**** A portion of the current working directory.  Including the full output of the {{Command{pwd}}} command might be helpful too
**** The exact command string you're running.
** Don't forget to include any relevant log information, configuration lines, and troubleshooting steps you've already taken.  You're more likely to get help if you start the process and can describe what you've already done to troubleshoot.
* Be sure to review everything for typos first.  Too many posts asking for help will be for problems caused by typos.  Save some time and check your typing first.
* If you solve your problem while you're waiting for help, be sure to post an update.  Don't let someone else waste their time helping you when you no longer need it.

!! Using code blocks

* Be sure all code, commands, and output is enclosed within a code block.  This will make it easier to identify commands and prevent Discord from interpreting special characters.
* Single commands can be put inside of a code block by enclosing your command in backticks.
* A series of lines can be put inside of a code block by putting three backticks at the start of the first line and three backticks at the end of the last line.
* When possible, sending text in code blocks is better then just sending a screenshot.  Text sent in a screenshot cannot be copy/pasted for any testing
* A full list of Markdown formatting options is available in the [[Discord help docs|https://support.discord.com/hc/en-us/articles/210298617-Markdown-Text-101-Chat-Formatting-Bold-Italic-Underline-]]

Example of using single line code block:
[img[img/discord-code3.png]]

Example of using multi-line code block:
[img[img/discord-code1.png]]

Results of using code blocks:
[img[img/discord-code2.png]]


!! Using threads

Threads in Discord will help keep the weekly channels and conversations organized.  Create a new thread for each question you're asking.

----
[img[img/discord1.png]]
# Click on the week number for the material you would like to discuss
# Click on the threads icon up top
----

[img[img/discord2.png]]
# Enter your thread name
# Enter your question in the Starter Message followed by any supporting information in additional posts.
# Click on //Create Thread//
----

[img[img/discord3.png]]
# To join a thread, click on the //# Message// link.  Your thread will open to the right
# Post any additional messages within the thread to the right. 
----

[img[img/discord4.png]]
The available threads will appear under the weekly channel.  You can click on the thread title to easily join the conversation.  

Also notice the excellent use of replies here.  Sending a message as a reply will notify your recipient they have a new message.
----

[img[img/discord5.png]]
If you would like to follow an interesting thread, right click on the thread message area and choose //Join Thread//.
----

[img[img/discord6.png]]
After joining a thread, it will appear on the left side of your screen under the channel for the week number.  This will make it easier to find later.


<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'><span class="miniTag" macro="miniTag"></span></div>
<div class='subtitle'>Updated <span macro='view modified date [[MMM DD, YYYY]]'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date [[MMM DD, YYYY]]'></span>)<BR><BR></div>
<div class='viewer' macro='view text wikified'></div>
<div class="tagglyTagging" macro="tagglyTagging"><BR><BR></div>
<div class="tagglyTagged" macro="hideSomeTags"></div>
<div class='tagClear'></div>
<!--}}}-->
/% @@ This will be used in the second half of the semester @@ %/
Proxmox hypervisor:  https://lab.ncs205.net/

Subnet: 192.168.12.0/24
Gateway: 192.168.12.1
DNS: 192.168.12.10

!! IP Address Assignments:
| !Last octet | !Host Name | !Description |
| n | test |Testing|
| n+1 | www |Web Server|
| n+2 | core |DNS, syslog, ntp|
| n+3 | files |Storage Server|
| n+4 |>| Unused |
| n+5 |>|~|
| n+6 |>|~|
| n+7 | ||

* The fully-qualified hostname for your VM is //host//.//username//.ncs205.net where //host// is in the second column in the table above.
* Your VM IP addresses should be in the form 192.168.12.//n// where //n// is the first IP address you have been assigned in the table below.  Increment the value of //n// as necessary for additional ~VMs.  
** Do not deviate from the provided IP addresses.  These IP addresses will be checked for grading.  If you use other ~IPs you will not receive credit for the labs and may conflict with other students.

| !Start IP | !Username |
| 24 | merantn |
| 32 | betrusca |
| 40 | borysyy |
| 48 | casabob |
| 56 | chaputf |
| 64 | dibblec |
| 72 | fernanjp |
| 80 | filkinb |
| 88 | gottlep |
| 96 | gregusc |
| 104 | hammat |
| 112 | hewittb |
| 120 | huseinm |
| 128 | hwangpkj |
| 136 | jodwaym |
| 144 | kolodzt |
| 152 | louief |
| 160 | masono |
| 168 | mastron |
| 176 | mustafb |
| 184 | puigj |
| 192 | shipleh |
| 200 | woodd6 |
| 208 | woodwom |


! Lab network topology

[img[img/topo.png]]

/% awk -v ip=32 '{print "| " ip " | " $1 " |"; ip+=8}' user2009.txt %/
! Introduction to NCS 205

!!! Expectations:
Mostly outlined in the [[syllabus|syllabus/NCS205Syllabus2401.pdf]], but to recap:

* ''Honesty & Integrity'' - Cheating generally results in a failing ''course'' grade.
** This course is in a security program.  If you cannot be trusted, you do not belong here.
* ''Motivation & practice'' - You must be motivated to practice the work in order to pick up the material.
** Here's a good article discussing [[productive struggle|http://maateachingtidbits.blogspot.com/2017/11/the-role-of-failure-and-struggle-in.html]] that roughly outlines how I'm teaching this course.
* ''Graded Homework'' - Almost everything will be graded.
* ''Don't fall behind'' - Else the workload will bury you.
** Please let me know early if you're starting to run into trouble.

This class will also use Linux as a vehicle for reinforcing good soft skills.  You will be expected to:
* Provide clear and thorough explanations
* Ask questions when help is needed
* Be an active participant in your learning
** Using a whitewater rafting analogy - I'll be the guide and you're our paddlers.  I'll chart the path, but you need to get us there.

!!! Class Resources
* Required Textbooks:  
** First half of the semester - [[The Linux Command Line|http://linuxcommand.org/tlcl.php]] (free download)
** Second half of the semester - [[Linux Administration: A Beginners Guide, Eighth Edition|https://www.mhebooklibrary.com/doi/book/10.1036/9781260441710]]
* Class website:  https://www.ncs205.net/
** The class website will be our primary resource for course content
** Each content page is generally divided into three sections:  
### the content assignment (what to read or watch),
### my notes about the content
### the deliverables for that content
* Brightspace 
** [[Brightspace|https://mylearning.suny.edu/d2l/login]] will be used only for announcements and tracking grades.
* Discord will be used for regular class communication
** An invite to our server has been posted to Brightspace
** Discord participation will be [[evaluated as well|Class Participation]].

!!! Class Cadence
* A week's worth of new material will be posted to the class website Sunday evening in two parts.  
** Unless stated otherwise, part 1 assignments will be due by end of day Wednesday
** Part 2 assignments will be due by end of day Saturday.
** An [[assignment calendar|Calendar]] can be found on our class website in the menu bar above.
* Carnegie credit hour
** A Carnegie credit hour is defined as 50 minutes of lecture and 2 hours of prep/homework for each traditional course credit hour 
** This requirement is defined in [[SUNY Policy|https://www.suny.edu/sunypp/documents.cfm?doc_id=168]]
** Translated to our online class, this means we are expected to perform approximately 12 hours of instructional activity per week
** This is hard to gauge in an online class.  Please let me know if you feel we are regularly exceeding that.

!!! Extra Help

Several options exist if you are stuck and would like some extra help.  

* Post your question or problem to the class Discord server
** Be sure to post to the channel for the week the material was assigned
* Ad-hoc online meetings via Zoom.  Let me know if you'd like to schedule one.
* Regularly scheduled weekly Zoom meetings.  We can offer these if there is interest
* Weekly office hours.  We can schedule these once the semester gets settled in.


{{Warning{
This class will test your skills as a student; ''being a good student will be important in order to successfully complete this course''.  This will not be one where you can do the bare minimum and skate by with a good grade.  Good ''time management'' and ''study skills'' will be critical.  ''If you neglect the material you will likely not successfully complete the course.''

Everything we do this semester will look back on previous work. If you're rushing through and not retaining it, you will surely pay for it later.  Having a keen eye for detail, paying attention to the directions, and taking the time to practice and retain the material will make for a much smoother semester.
}}}


!! Accessing the class shell server

The class shell server is an always-on system we will connect to in order to practice the class assignments and submit homework.  There are two ways we will access the system - from the command line for entering commands or through a file transfer utility for uploading files.

!!! Connection Tools
* Access the shell (command line) with either:
** [[PuTTY for Windows|http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html]] (Download the latest version of the 64-bit MSI installer)
** [[PuTTY for Mac|https://www.ssh.com/ssh/putty/mac/]]
** Mac, Linux, or Windows Subsystem for Linux:  You can also use the command-line SSH.  Launch your terminal and run the command {{Command{ssh -p 2205 //username//@lab.ncs205.net}}}.
* Transfer files between the server and your local system:
** Windows: [[WinSCP|https://winscp.net/eng/download.php]]
** Mac: scp/sftp on the command line or any SFTP client like [[FileZilla|https://filezilla-project.org/]]
* Portable versions exist for these applications.  This is convenient if you are using campus ~PCs that do not have the tools installed.  You may download and run them from a flash drive or your home directory in the lab.
** [[PuTTY|http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html]] - Download and run putty.exe
** [[WinSCP|https://winscp.net/eng/download.php]]: Download the portable package

!!! Logging in
* Use one of the tools above to log in to ''lab.ncs205.net'' on port ''2205''
* Log in with your campus username
* Your initial password will be posted to the Week 1 forums in the Brightspace Discussion Board.
* Change your password after logging in.
** Run the {{Command{passwd}}} command to change your password
** ''Any accounts still using the default password will be locked on Saturday, January 21.''

This short video will walk you through downloading ~PuTTY, a unix remote access client, and connecting to the system for command line access.  Your initial password for the server can be found in the Brightspace discussion board.

Video:  [[Accessing the shell server]]
/% Download: ~PuTTY - [[installer|https://the.earth.li/~sgtatham/putty/latest/w64/putty-64bit-0.70-installer.msi]] or [[exe|http://the.earth.li/~sgtatham/putty/latest/win64/putty.exe]] %/

!! Working on the command line

Console
* Console is considered the interface with a system as though you are physically sitting at its monitor and keyboard.  This lets us interact with the system before the operating system loads
* A virtual console is available for ~VMs or through a lights-out management utility such as a Dell iDRAC.  
Remote access
* Remote access to a Linux system such as our class shell server can also be obtained through a remote access service like SSH (Secure ~SHell).  
* SSH is the standard command-line remote access interface for Unix/Linux systems.  It allows us to interact via a SSH client, much like how your web browser is a client to a web server.
* Our class shell server is a traditional timeshare server.  It's always available; we don't power it off.
Shells
* The shell is our interface with the command line.  It's a program that takes input from the user, passes it on to the system to process, and returns any output back to you.


!!! Navigating our lab server's filesystem:
* Directory paths
** Directory paths enable us to have a hierarchy of directories and keep our files organized
** Similar to the command line on Windows
** The path separator is a forward slash on Unix/Linux systems - {{File{''/''}}}
** Change directories with the {{Command{cd}}} command
*** eg:  {{Command{cd /opt/pub/ncs205/submit}}}
** List the contents of the directory with the {{Command{ls}}} command
** List the contents of the directory in long format with the {{Command{ls -l}}} command
*** Displaying the contents of a directory in long format is always preferred so you can easily see all information about the files
* Some directories of interest:
** {{File{/home/}}} - User home directories **typically** reside below this directory tree.  This is just a standard convention - home directories can be anywhere on the system.
*** A user home directory is a space where each user can save their files.
** {{File{/opt/pub/ncs205/submit/}}} - Lab/Homework assignments are uploaded to this directory
** {{File{/opt/pub/ncs205/returned/}}} - Graded homework assignments are stored in this directory for you to download
** {{File{/opt/pub/ncs205/data/}}} - Data files for labs are stored here
** {{File{/tmp/}}} - Temporary scratch space

!!! Executing commands
* Structure of a command string:
** {{Command{''command'' [options] [arguments]}}}
** options and arguments may be optional or required depending on the command
** In Unix command documentation, an item within the square brackets is an optional component.  Some commands will also require arguments and some will not.
*** The documentation for each command will outline its requirements.

* Viewing files
** Display a file: {{Command{cat //filename//}}}
** Display a file one page at a time:  {{Command{less //filename//}}}
** Edit a text file:  {{Command{nano //filename//}}}  ''-or-''  {{Command{vi //filename//}}}


!!! Other useful commands
* The UNIX manual - {{Command{man}}}
** If you want to learn more about a command, check out its manpage.
** For example, {{Command{man ls}}} will display detail about the {{Command{ls}}} command

!!! Using Discord
* Class discussion in Discord will make up 10% of your total course grade this semester.  
* How this part is graded is discussed in the [[Class Participation]] page
* [[Using Discord]] contains some tips for how to post

!!! Working efficiently
* View your previously executed commands with the {{Command{history}}} command
* Tab completion - Press the tab key to autocomplete commands or file paths
* Up / Down arrows - search up and down through your command history
* Page Up / Page Down - Use these keys to search through your command history for the last commands which begin with a given string
** For example, typing {{Command{ls}}} and then pressing Page Up will jump you to the last command string you executed which started with {{Command{ls}}}.
* The [[Linux Shortcuts]] page will have some more useful items.


!! Submitting homework assignments

See the [[Lab Assignments]] page for details 
! Material 
!! Read:
* Chapter 1 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]


! Operating system basics 

!! Major components
*Kernel - Main control program of the computer:  process control and resource management.
*File system - Organize location of items on the system.  Everything is a file.  
*Shell - Main interaction between the user and the system.  The shell receives and interprets commands entered by users, passes them on to the kernel to execute.

!! Secondary components:
Not part of the core OS, but necessary to do useful things with the system
* Basic Utilities - Many are from the GNU project 
** System - Tools an administrator would use: mount, dd, fsck
** Userland - Tools regular users would use:  file system tools (cd, ls, mkdir), text editors (vi, pico), filters (grep, cut, sed), process tools (ps, kill) 
* Development environment - compilers
* System Documentation - man pages, docs, etc.
* Larger Applications - Graphical interface, word processor, image viewer, etc
* Specialized utilities (Like the tools that come with a distro like Kali)

!! UNIX is an OS that supports:
* Multi-tasking - foreground and background processes
* ~Multi-User - Multiple users may access the system at the same time.
** Privilege separation - administrators (root) and regular users, with regular users able to be isolated from eachother.
* Time sharing - Share computing resources among many users
* Portability - Can be run on different types of hardware systems/architectures (~PCs, servers, game systems, phones, embedded, etc)

!! Types of Unix

* Started at Bell Labs
** 1969 Bell labs - AT&T Unics (Uniplexed Information & Computing Service)
** Unics became UNIX when multiuser support was added
** 1958 antitrust agreement with the government,  AT&T could not go into the computer business and charge for software.  They had to give licenses to anyone who asked.
** Sources distributed to researchers at universities allowing them to modify and extend.
** Unix editions 1 - 10 numbered from the edition of the printed manual.  Mostly for research and development purposes.

* 1978 BSD UNIX (Berkeley Software Distribution)
** Grad students at Berkeley modified and extended the AT&T code
** Bundled and released their addons for use at other universities.
** Early development slowed due to licensing issues with AT&T
** Would eventually fork into three projects: ~FreeBSD (1993), ~OpenBSD (1993), & ~NetBSD (1995)

* GNU project & the origins of Linux:
** GNU Project (GNU's not Unix) - Richard Stallman (1983) - Wanted to create a totally free OS. They started with the utilities.
*** FSF : Free Software Foundation (1985)
*** At MIT, saw many MIT software developers get picked off by companies and sign restrictive non-disclosure agreements.  
*** Many companies were now restricting access to Unix source code to limit modification and redistribution, facilitate hardware lock-in and push towards expanded commercialization.
*** Software should be free to run, change, copy and modify so users are the ones in control, free from corporate control, and better software develops - GNU license
*** Brought a philosophy of freedom (freedom (speech), not price (beer) )
*** FSF kernel (GNU/Hurd) taking too long to develop, though all other components (the utilities) were complete.
** 1991 - Linux Kernel:
** Linus Torvalds, Finnish grad student, started working on a kernel for fun after getting impatient for a totally free kernel to work with due to all of the legal battles
*** Minix good for academics, but not allowed for professional use
*** Minix still required a fee and had a restrictive license
*** BSD still somewhat encumbered by AT&T license 
*** BSD legal problems stalling development
*** Linux only had a kernel, not a complete operating system.

* Linux distributions:
** GNU Project had utilities but no kernel.  Linus Torvalds had a kernel but no utilities.
** Linus provided the kernel (Linux kernel) to accompany FSF GNU utilities & components to make a Linux OS
** Different distributions (Fedora, Gentoo, Debian, Ubuntu, etc) combine the Linux kernel, FSF utilities, and other applications in different ways and focus on different types of users.
** Success due to freedom - many other programmers able to contribute code and ideas.

* Commercial: (AIX, HPUX)
** Several commercial Unix distributions exist and are only found in large enterprises.  These were more popular decades ago and have lost ground to Linux.

A good article about the history of Unix/Linux:  [[Did Linux Kill Commercial Unix|https://www.howtogeek.com/440147/did-linux-kill-commercial-unix/]]


! Interacting with the system

There are two ways to interact with a system:  through a graphical interface (GUI) or the command line (CLI).  Most of our work this semester will be conducted through the CLI.

!! Graphical User Interface (GUI)
* Desktop environment on top of OS; just another application
* Examples of GUI Desktop managers:
** Gnome
** KDE
** XFCE
* Much better for multitasking
* How to use it
**~Alt-F2 - Run a command
**~CTRL-ALT-Arrows - Change virtual desktops
**~CTRL-ALT-BKSP - Restart the window manager (if enabled)
** Navigating the menus - Much like what you're used to on either Windows or Mac
** Mouse: Highlight to copy, middle button to paste
*** This is the standard Unix/Linux way to copy/paste.  In putty, highlighting text copies it to the clipboard and clicking the right mouse button will paste to the terminal

!! Command Line Interface (CLI)
* Can be accessed from within the GUI, eg: the terminal program
* Or Console, which is accesses when you're sitting down at the keyboard and monitor on a system not running a graphical environment
* Virtual Console (~CTRL-ALT-F[2-9]).  Unix systems run many virtual consoles which can be accessed to run other tasks.
* Access remotely, such as via SSH.  Will be accessing our class shell server remotely to complete our work.

* Unix/Linux is primarily a Multi-user environment.  Many users can easily log in concurrently and work simultaneously.
** About accounts:
*** Home directory - Every user has a home directory where they can store files
*** User ID - The ID number assigned to your account.  It's these numbers which identify you as a user
*** Group ID - Users may belong to groups for shared resources.  Everyone in this class is a member of the ncs205 group and can access this class's resources
*** Who am I - List information about your user account:  {{Command{id}}}
*** Who are you - List information about other user's accounts:  {{Command{id //username//}}}
*** Who is connected:  {{Command{w}}} or {{Command{who}}}


!! The Shell

The shell is our command processor that provides:
* An interpreter - it reads and interprets commands from the user, 
** displays the shell prompt and waits for input (Case matters here!)
** user interface for entering and processing commands from the user
** works with the kernel to execute them
* Programming interface
** script interpreter for executing shell scripts
** a shell script is just a collection of commands

!! Different Shells

Different shells for different things:  bourne, bash, csh, tcsh, korn

[>img[img/shell.jpg]] 
The Shell is what users interact with on the command line.  It receives and interprets commands. 

[[Two main families|img/shells.jpg]] - bourne and ~C-Shell

Thompson shell, original unix shell, ends with AT&T 6th edition and replaced by the modern branches:
* Bourne Shell ({{Command{sh}}}) 
**written to replace limited abilities of original shell
**Oldest and most primitive
**Korn shell ({{Command{korn}}}) - Closed shell from Bell Labs
***Built to be a vast improvement over the bourne shell
***Adopted in future editions of AT&T Unix (8-10th editions)
***Became popular with commercial users as a higher end, more powerful shell, especially as a programming language
**Bash ({{Command{bash}}}) - FSF - ''B''ourne ''a''gain ''sh''ell
***Extends bourne shell while being free to distribute 
***Free software, community supported, part of the GNU toolset.
*~C-Shell ({{Command{csh}}}) - Created by Bill Joy for the Berkeley Software Distribution (BSD) unix.
** Based on C Programming language.  scripting commands are based on C statements
** BSD License, couldn't distribute freely
** TCSH ({{Command{tcsh}}}) -
*** Enhancement of the C-shell while being free from licenses
*** In public domain for academic users

Which to use:
Split into three camps:  {{Command{bash}}} for Linux, {{Command{tcsh}}} in BSD branch, and {{Command{korn}}} for commercial distributions (IBM AIX and ~HP-UX)
What we'll be using this semester:
Interactive use: bash, since we're doing everything in Linux
Shell scripting:  bourne for portability/compatibility or bash for extended features.

We can see available shells on a system with:  {{Command{cat  /etc/shells}}}

The shell is just a regular program, so anyone can design their own shell.  You can also execute it by its command name to run a different shell.

!! Working with the shell

* Commands are entered at the shell prompt
*Syntax: {{Monospaced{''command_name'' [options] [arguments]}}}
** Command - what action to take
** Options - modify how the action is applied / how the command does its job
** Arguments - Provide additional info to the command (object identifiers, text strings, file names, etc)
** Some options can have arguments (option arguments) to provide additional information
*** For example, {{Command{mkdir -m 755 ~/open/}}} to create the directory named {{File{open}}} within your home directory with different starting permissions.  Here, the {{Monospaced{755}}} is an argument to the {{Monospaced{-m}}} option.
** The components in a command string must always be separated by some form of whitespace
*** The command string {{Command{ls -l /tmp/}}} is correct where all three options are properly separated by whitespace.  Whereas the command {{Command{ls-l/tmp/}}} is an invalid command that does not exist on the system.
** In documentation, brackets around a component show that component is optional and not required by the command.  Consider these two examples:
*** {{Monospaced{grep [OPTIONS] PATTERN [FILE...]}}} - only the //PATTERN// argument is required.
*** {{Monospaced{ls [OPTION]... [FILE]...}}} - the command may be executed without specifying any options or arguments
**** The ellipsis (three dots) denote additional items can be added.  For example, multiple files can be specificed for the {{Command{grep}}} or {{Command{ls}}} commands.

** Example commands:  {{Command{ls}}}, {{Command{date}}}, {{Command{cal}}}, {{Command{who}}}
* default options  /  arguments 
** {{Command{date}}} - by default, show the current date and time.  Different options can be specified to alter the format the date is displayed in.
** {{Command{cal}}} - by default, show a calendar for the current month.  {{Command{cal 2022}}} will display the entire year.
** {{Command{cd}}} - by default, change directory to the user's home directory.  Specifying an argument will change to that directory instead.
* combining options
** several options can be combined together, for example: {{Command{ ls -lrt }}} to display the contents of the current directory in long listing format, sorted by modification date, with the most recently accessed files at the bottom.
* {{Monospaced{ - }}} vs {{Monospaced{ &#45;-word }}} options (eg: the {{Command{cut}}} command)
** Some options can be specified with a single dash and letter, eg {{Monospaced{ -d }}}
** Or with two dashes and a word, eg {{Monospaced{ &#45;-delimiter }}}
* Autocompletion - Enter the first few characters of a file or command and the shell will complete the rest.
** Tab key & ~CTRL-D
* command history
** The keyboard up/down arrows can be used to cycle through previous commands
** type first 2 letters and hit page-up to return to the last command which begain with those characters
* Canceling a command with ~CTRL-C
* grouping commands with ;
** example: {{Command{date ; cal}}}

!! Shell customization files
* bourne shell family:
**.profile - commands used to customize the shell
*C-shell family
**.cshrc - Shell environment (run on both interactive and non-interactive shell)
**.login - Run on login, contains commands and variables
**.logout - Script that runs on logout


! Finding information / UNIX Documentation
[>img[https://imgs.xkcd.com/comics/rtfm.png][https://xkcd.com/293/]]
*Usage: {{Command{man [section] //command//}}} - the unix manual / man pages
**Navigation:
***moving up and down with space, f and b
***search down with /, up with ?, n for next match
***g to top of page, G to bottom of page
***q to quit 
***h for help

* Man page chapters:
**Synopsis - Overview of the command, listing options and requirements.  Optional items are contained within {{Monospace{''[ brackets ]''}}}
**Description - Description of actions performed by the command and detail information about all of the supported options
**Examples - Examples of common usage	
**See Also - Other man pages to read for related information
**Check manpages for look, chmod, cut

*{{Command{man -k //keyword//}}} - search the unix manual

*Manual sections:
**1 - Commands
**2 - System Calls
**3 - Library Functions
**4 - Special Files
**5 - File Formats
**6 - Games
**7 - Misc Info
**8 - System administration

{{Note{''Note:'' When working with Unix documentation, items in ''[''brackets'']'' are optional.}}}

! Deliverables

!! Read Chapter 1 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
- Complete [[Lab 1|labs/lab1.pdf]]

''Warning:'' Do not complete these lab assignments ~PDFs within your web browser.  Download the files and open them in [[Acrobat Reader|https://get.adobe.com/reader/]] or a similar PDF document reader.  Web browsers to not save form input appropriately and your responses will likely be lost.

{{Warning{It's wise to preview the completed PDF document in Acrobat Reader to verify your responses before uploading to the class shell server.}}}

Be sure to read the the instructions for submitting assignments and information about the labs in the [[Lab Assignments]] page.  Assignments will only be accepted if they are properly submitted according to the instructions.

! Material
!! Read: 
* Chapter 2 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]


! UNIX Files

Navigating the filesystem is at the core of working with the unix command line.  Explore our shell server using the {{Command{cd}}}, {{Command{ls}}}, and {{Command{pwd}}} commands.  Files for our class and your labs can be found within the {{File{/opt/pub/ncs205/}}} directory.  Use the material introduced in this chapter to explore the filesystem on the shell server, especially the directories within {{File{/opt/pub/ncs205/}}}.

Everything on a Linux system is either a file or a process
* Defined: A file is anything that can be a source or destination of data.

!!File types:  (ls will show the type)
* Ordinary files (also called regular files) - items such as text files, programs, and images
** {{Command{file}}} command - This command can be used to display a file's type
*** eg:  {{Command{file /usr/bin/ls}}}
** {{Command{strings}}} command - This command can be used to extract ascii strings from a binary file
* Directory files - special files that contain lists of files and other directories.  How files are organized on the system.
** Standard conventions:  A file written with a trailing slash (eg: {{File{/opt/pub/ncs205/}}} refers to a directory.
** Root directory - Highest level of the file system =  {{File{ / }}}
**Navigate directories with the {{Command{ cd }}} command
*** eg:  {{Command{ cd /opt/pub/ncs205/ }}}
*** The {{Command{ cd }}} command's default argument will change to your home directory
*** {{Command{cd -}}}  will change to the last directory you were in
** Display file metadata (eg, permissions and date information) on a directory with  {{Command{ls -ld}}}
*** Example: {{Command{ ls -ld /opt/pub/ncs205/ }}}
** Special directories contained within every directory on the system:
*** {{File{.}}} = Refers to the present working directory.  (The directory you are currently in)
*** {{File{..}}}  = Refers to the parent directory.  See warning note below.
** Your home directory.  A place for your files on the system.  Referred to with the shortcut symbol ({{File{~}}} or the variable $HOME)
*** Either use it alone to refer to your home directory, eg: {{Command{ls ~}}}
*** Or with a username to refer to another user's home directory, eg: {{Command{ls ~//username//}}}
** Relative & absolute path names
*** Absolute path - a path that starts from the root of the filesystem.  It will always begin with {{File{ / }}}, eg:  {{File{/opt/pub/ncs205/submit/}}}
*** Relative path - a path that starts from your current directory, eg:  {{File{ ncs205/submit}}} (notice the lack of a {{Monospaced{ / }}} at the beginning of this path).
** Working directory
*** The //current working directory// is the directory you are currently located in at the shell prompt.  
*** The command {{Command{pwd}}} (print working directory) will display the full path of your current working directory to the screen.  This helps keep track of where you are on the system.
** Obtain disk usage of a directory with the  {{Command{du}}}  command, eg:  {{Command{ du -sh ~ }}}
* Symbolic Links - special files that are pointers to other files
* Hardware devices - storage medium (hard drive, DVD drive, flash drives, etc), display, network cards, etc.
** Character devices - device that reads or writes data one character at a time
** Block device - device that reads or writes data one block at a time
* FIFO (aka named pipe) - Used for interprocess communication
* Socket - Used for network communication

{{Warning{''Note:'' The special directory {{File{..}}} refers to the ''parent directory'', not //previous directory//.  The word previous is ambiguous and could mean the last directory you were in.  The last directory you were in could be anywhere on the filesystem.  Referring to the special directory  {{File{..}}}  as //previous directory// will be considered incorrect. }}}

!! File and directory names
* Can be any sequence of ASCII characters up to 255 characters
* Start file names with an alphabetic character
* Try to avoid spaces.  Instead use dividers to separate parts of the name, such as {{Monospaced{ _ - : . }}}
* Use an extension that describes the file type
** For example, the file extension {{File{ homebackup_december.tgz }}} suggests this is a gzip-compressed tar archive.
* Files beginning with a dot are hidden from normal view
** These are typically configuration files, like {{File{.bash_profile}}} or {{File{.bashrc}}}
* Avoid special characters (shell metacharacters) in your file names, such as:    {{Monospaced{ & * \ | [ ] { } < > ( ) # ? ' " / ; ^ ! ~ % ` }}}
** All of these symbols mean something special to the shell.  Keep track of what they mean.  [[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]]
* If special characters were used, escape them with a {{Monospaced{ \ }}}
** Example:  {{Command{vi  commands\&examples.txt}}}
* Put quotes around your file name if spaces must be used
** Example:  {{Command{vi "Long File Name.txt"}}}
* We'll discuss escaping, quoting, and metacharacters in more detail later

!! Basic file manipulation

!!! Listing files - {{Command{ls}}}
The {{Command{ls}}} command will list the contents of a directory.  Extra options can be used to alter the default behavior of the {{Command{ls}}} command:
* {{Command{-a}}} - This option will include hidden files in the output
* {{Command{-l}}} - This option will display the output in //long listing// format.  Additional information about the files will be displayed.  Display your files with a long listing is preferred so you a presented with the additional detail.


! Deliverables

!! Read Chapter 2 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
- Complete [[Lab 2|labs/lab2.pdf]]
! Material:

Continue previous shell scripting work

! Assignments:

* Complete [[Lab 37|labs/lab37.pdf]], [[Lab 38|labs/lab38.pdf]] - Past due
* Complete [[Lab 39|labs/lab39.pdf]], [[Lab 40|labs/lab40.pdf]] - Due Wed, Mar 27
* Complete [[Lab 41|labs/lab41.pdf]], [[Lab 42|labs/lab42.pdf]] - Due Sat, Mar 30
! Material

!! Read Linux Administration - A Beginner's Guide:
* Chapter 12 until page 265 (tcpdump) - Good Networking background info
* Chapter 13 - Network Configuration

!! Watch
* Good networking background info, but not required: https://www.youtube.com/watch?v=fHgk7aDGn_4

! Notes

For the remainder of the semester we will transition from being users of a Unix system to administrators.  A virtual lab environment is available where everyone will be assigned a small collection of Unix servers to configure and manage.  

{{Note{
!!! Getting ahead of future problems:
# ''About 50% of the problems we will encounter will be due to typos''
** Leave little to chance and use copy/paste for long or complex commands or configurations
** Pay attention to what you are typing.  Some characters, like a 1 and an l, look alike.  Be sure you know which you're dealing with.
** Use your VM's console only to configure networking and bring it online.  After that, do everything through SSH
# ''About 25% of the problems later in the semester will be due to rushing through earlier material instead of taking the time to retain it''
** Everything we do this semester will look back on previous work.  If you're rushing through and not retaining it, you will surely pay for it later.
# ''About 20% of the problems will be due to not following the directions''
** Go slow and pay attention!  Each of the steps matter.  If you skip over important steps or ignore errors, don't expect things to work.
# ''< 5% of the problems will be due to genuine system errors''
** Having a keen eye for detail, paying attention to the directions, and taking the time to practice and retain the material will make for a much smoother semester.
}}}

!! Linux Basics

There are many different Linux distributions available.  The distro to choose is a combination of the system's purpose and personal preference.  

Examples of different Linux distributions are:
* Server - ~CentOS, ~ClearOS
* Desktop - Fedora, Mint 
* Dual (both desktop & server editions) - Ubuntu, Debian
* Build from source / minimal - Gentoo
* Special Purpose - Kali, Clonezilla, ~GParted

Obtaining a Linux distro
* Directly from the distro's website
* [[DistroWatch|http://distrowatch.com/]] - A site which tracks all available Linux distributions

Installation options
* Single OS
* Dual boot (eg: Dual boot between Windows and Linux)
* Virtualization (eg: ~VirtualBox)
* Live USB (Kali is a great option for this)


For our class, everyone will be assigned Linux virtual machines.  These class ~VMs have already had the OS installed from a common template.  We'll be using ~CentOS minimal, the same distribution used for our class shell server.  This is a bare-bones installation by default.  All other software will need to be installed.  This allows for a slim and nicely tuned system which only contains the components required for its function.

!! Bringing our class ~VMs Online

1. Establish a SSH tunnel for your web browser using the directions in the [[Tunnels & Proxies with SSH]] page.
2. Direct your web browser to https://lab.ncs205.net/ to work with your VM.
* This is a protected resource not accessible to the open internet.  You must tunnel your connection to reach it.
* Log in to the Proxmox web UI with the same credentials you used for the class shell server.
* Select your VM from the list on the left
* Click the Start button in the top right corner if your VM is currently powered down.
** The monitor icon next to your VM name should change from black to white when it is powered on
* Once your VM begins to boot, select the drop-down next to Console and pick xterm.js
** Once the console windows opens, you may need to press Enter to get to the login prompt.

!!! Set root password
Log in with the username ''root'' and set a password with the {{Command{passwd}}} command.  Do not forget this root password.
 - The user ''root'' is the standard administrative account.  This special user account has full access to manage the system and all files on it.
 - There is currently no root password set.  You should be able to log in without being prompted for one.
 - Without setting a root password you will not be able to log in remotely via SSH.


!! Basic networking

Our virtual lab environment is behind a single IP address.  The internal portion is utilizing a private IP space, the subnet 192.168.12.0/24.  This setup is much like a home network where your home systems all share a single public IP address and are behind a router.  This router protects the internal systems since none of them are directly accessible from the internet.  Since your ~VMs are all behind a router, you cannot SSH into any of them directly.  You'll first need to SSH into the class shell server and from there you can SSH into your VM.


!! Set IP addresses

Everyone has a block of 8 ~IPs to work with.  We have five things to configure to bring them fully online:  IP address, netmask, gateway, DNS, and host name

The table below contains the fourth octet of your ''starting'' IP address. Use this to assign to your first VM.  The first three octets are 192.168.12.

| !Start IP | !Username |
| 24 | merantn |
| 32 | betrusca |
| 40 | borysyy |
| 48 | casabob |
| 56 | chaputf |
| 64 | dibblec |
| 72 | fernanjp |
| 80 | filkinb |
| 88 | gottlep |
| 96 | gregusc |
| 104 | hammat |
| 112 | hewittb |
| 120 | huseinm |
| 128 | hwangpkj |
| 136 | jodwaym |
| 144 | kolodzt |
| 152 | louief |
| 160 | masono |
| 168 | mastron |
| 176 | mustafb |
| 184 | puigj |
| 192 | shipleh |
| 200 | woodd6 |
| 208 | woodwom |
/% awk -v ip=32 '{print "| " ip " | " $3 " |"; ip+=8}' user2009.txt %/
* This is a ~Class-C subnet with a /24 CIDR mask.  Your netmask will be 255.255.255.0.
* The default gateway for these systems, the next hop towards the internet, is 192.168.12.1
* Our DNS resolver is at 192.168.12.10

!!! Manually apply static IP address immediately:

There are two ways to manually apply an IP address.  The old way with the old utilities and the new way most newer distributions are utilizing.  Our systems must be configured with the new way; they do not come with the old tools by default.  It is generally easier to bring your systems online manually using the virtual console and then SSH into them to complete the configuration.  You can also install nano if you're not yet comfortable with vi once the networking is configured.

!!!! The old way:
* Now requires the ''net-tools'' package on systemd-enabled installations
** The {{Command{ ifconfig }}} and {{Command{ route }}} commands are no longer installed by default
* Access a root prompt
* Set the ip address
** {{Command{ifconfig eth0 inet 192.168.12.''x'' netmask 255.255.255.0}}}
** Test it:  {{Command{ping 192.168.12.1}}}
** But we can't yet leave our local network:  {{Command{ping 1.1.1.1}}}
* Set the default route
** {{Command{route add default gw 192.168.12.1}}}
** Test it:  {{Command{ping 1.1.1.1}}}
* Set the system host name:
** {{Command{hostname test.//username//.ncs205.net}}}
** Be sure to replace ''//username//'' with your actual username in the above command.  Do the same wherever you see //username// in italics.
* Test by reinvoking the shell by executing {{Command{bash}}}


!!!! The new way using the {{Command{ip}}} command:
* Log in and access a root prompt
* Ensure the interface is up:
** {{Command{ ip link set eth0 up }}}
* Set the ip address
** {{Command{ ip addr add 192.168.12.''x''/24 dev eth0 }}}
** Test it:  {{Command{ping 192.168.12.1}}}
** But we can't yet leave our local network:  {{Command{ping 1.1.1.1}}}  (This should fail)
* Set the default route
** {{Command{ ip route add default via 192.168.12.1 }}}
** Test it:  {{Command{ping 1.1.1.1}}}  (This should now work)


!!!! Configure DNS & host name

* Configure DNS:
** DNS is not yet configured so DNS resolution cannot yet take place.  Attempts to ping a system by its host name should fail.
{{{
[root@localhost ~]# ping google.com
ping: unknown host google.com
}}}
** Add the following line to {{File{/etc/resolv.conf}}} to specify the DNS server to use for mappings between hostname and IP address.  
*** ''nameserver 192.168.12.10''
** Test it:  {{Command{ ping www.google.com }}} 


* Execute this command to set the system host name immediately:
** {{Command{hostname test.//username//.ncs205.net}}}
** Don't forget to replace ''//username//'' with your actual username
* Verify with {{Command{ hostnamectl }}}
* Test by reinvoking the shell:  {{Command{ bash }}}
* Edit the file {{File{/etc/hostname}}} so it contains the system hostname.  This file will be used to set the hostname on boot.
{{{
[root@test ~]# cat /etc/hostname
test.merantn.ncs205.net
}}}


!!!! Test connectivity by accessing your VM via the network

* Open putty or your SSH client and connect to our class shell server:  ''lab.ncs205.net''
* From the class shell server, connect to your VM via SSH:  {{Command{ssh 192.168.12.''x'' -l root}}}
** Use the root password you just set

{{Warning{''Warning:''  The {{Monospaced{''-l''}}} above in the {{Command{ssh}}} command string is a dash followed by a lowercase letter {{Monospaced{''l''}}}, not the number {{Monospaced{''1''}}}.  Be sure you can spot the difference between these two characters.  It's subtle, but they are different.}}}

{{Note{''Note:'' You'll likely see a similar warning the first time you SSH into a server:
<<<
{{Monospaced{
[nick@shell ~]$ ''ssh 192.168.12.24''
The authenticity of host '192.168.12.24 (192.168.12.24)' can't be established.
ECDSA key fingerprint is ~SHA256:fnSqW1mfXsFRg/i9XMqQ4/l3FoEYKX1BteuR7FgDXlc.
ECDSA key fingerprint is ~MD5:7f:07:b8:32:2c:c8:94:af:c2:66:8d:6e:3c:b6:08:2d.
Are you sure you want to continue connecting (yes/no)? ''yes''
Warning: Permanently added '192.168.12.24' (ECDSA) to the list of known hosts.}}}
<<<
This warning means the destination server is unknown and untrusted because the key isn't recognized.  The SSH client is giving you an opportunity to validate the remote host to ensure it is the correct system.  This error is expected if its the first time you're connecting.  But if it appears spontaneously, some paranoia may be warranted and you should verify the key fingerprint over another channel.  If it was an imposter, that impostor would be able to intercept your credentials and other communication with the remote host.

You'll need to answer {{Monospaced{''yes''}}} to the question in order to proceed.  The host fingerprint will then be saved to your system within {{File{~/.ssh/known_hosts}}} so you're not prompted again in the future.
}}}

!!! Modify networking configuration files:

The {{Command{ ip }}} commands we just used will cause these changes to take effect only for the current boot instance of the system.  These settings will be lost once the system reboots.  We need to edit the appropriate configuration files so these settings will be applied on system startup.

!!!! Edit the file {{File{ /etc/sysconfig/network-scripts/ifcfg-eth0 }}}
These configuration options apply to the interface

# Ensure the ''HWADDR'' line is commented out (if it exists).
# Change the ''BOOTPROTO'' option from ''dhcp'' to ''static''
# Change the ''ONBOOT'' option from ''no'' to ''yes''
# Add the following lines:
<<<
IPADDR=192.168.12.''x''
NETMASK=255.255.255.0
GATEWAY=192.168.12.1
~DNS1=192.168.12.10
<<<
Replace ''x'' with the IP address assigned to your VM


!!!! Edit the file {{File{ /etc/hostname }}}
# Change the current contents to:  ''test.//username//.ncs205.net''
# The name ''test'' is only for this first VM.  A different host name will be used for future ~VMs.
# Don't forget to change ''//username//'' to your actual username


!!!! Add a line to {{File{ /etc/hosts }}} which resembles the following:

{{{
192.168.12.24         test.merantn.ncs205.net test
}}}
Replace the last octet of the above IP address with yours and replace my username with yours.


!!! Switch back to console and test


!!!! Restart networking services
This command will restart networking services on your system, activating the new settings to ensure they were correct.

{{Command{systemctl restart network}}}


!!!! Check your settings
Verify your configuration with the {{Command{ip addr}}} and {{Command{ip route}}} commands.  

The output of {{Command{ip addr}}} should resemble:
{{{
[root@test ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether ea:d3:f4:ee:bd:33 brd ff:ff:ff:ff:ff:ff
    inet 192.168.12.24/24 brd 192.168.12.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::e8d3:f4ff:feee:bd33/64 scope link
       valid_lft forever preferred_lft forever
}}}
The last octet in the IP address above is unique to each system.  .24 is used here, your value must be different.

The output of {{Command{ip route}}} must minimally resemble the following lines.  Additional lines may be present.  
{{{
[root@test ~]# ip route
default via 192.168.12.1 dev eth0
192.168.12.0/24 dev eth0  proto kernel  scope link  src 192.168.12.24  metric 100
}}}

Hostname verification should resemble:

{{{
[root@localhost ~]# hostname
test.merantn.ncs205.net
}}}


!!!! Verify network connectivity
You should now be able to ping the default gateway for our test network by its IP addresses and google by its hostname.
{{{
[root@localhost ~]# ping 192.168.12.1
PING 192.168.12.1 56(84) bytes of data.
64 bytes from 192.168.12.1: icmp_seq=1 ttl=64 time=1.45 ms
64 bytes from 192.168.12.1: icmp_seq=2 ttl=64 time=1.23 ms
64 bytes from 192.168.12.1: icmp_seq=3 ttl=64 time=1.23 ms
^C
--- 192.168.12.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2399ms
rtt min/avg/max/mdev = 1.235/1.308/1.454/0.110 ms

[root@test ~]# ping www.google.com
PING www.google.com (172.217.3.100): 56 data bytes
64 bytes from 172.217.3.100: icmp_seq=0 ttl=57 time=2.147 ms
64 bytes from 172.217.3.100: icmp_seq=1 ttl=57 time=1.434 ms
64 bytes from 172.217.3.100: icmp_seq=2 ttl=57 time=1.266 ms
^C
--- www.google.com ping statistics ---
3 packets transmitted, 3 packets received, 0.0% packet loss
round-trip min/avg/max/stddev = 1.266/1.616/2.147/0.382 ms
}}}


!!!! Remote connections with SSH

The configuration of our virtual lab network will not allow direct outside connections to virtual machines.  You must first connect to the lab SSH jumphost.  

In this example I'm connecting to the class shell server from my home Unix system and then my test VM.  You can also use putty or a similar tool to first connect to the class shell server.

{{{
nick@trillian:~>ssh lab.ncs205.net -l merantn
Last login: Sat Mar 14 13:38:20 2020 from hoot
[merantn@shell ~]$


[merantn@shell ~]$ssh 192.168.12.24 -l root
Warning: Permanently added '192.168.12.24' (RSA) to the list of known hosts.
root@192.168.12.24's password: 
Last login: Sat Mar 21 23:35:38 2020
[root@localhost ~]#
}}}

{{Note{''Note:''   The {{Monospaced{''-l''}}} above in the {{Command{ssh}}} command string is a dash followed by a lowercase letter {{Monospaced{''l''}}}, not the number {{Monospaced{''1''}}}.}}}


! Our class infrastructure

None of this will be tested, but setting up our course infrastructure is related to the material we're covering so describing the setup seems prudent.  Here's how it all works under the hood.

!! Original class shell server

Our original class shell server was a VM running at [[DigitalOcean|https://www.digitalocean.com/]].  They are an easy to use yet low-cost cloud VM provider and a great choice if you need to stand up a short-term VM for a project.  The old VM cost $5 per month to run.  Most of my personal infrastructure is here.

After creating an account and logging into the dashboard, you're given an option to create ~VMs (DO calls them Droplets) with several operating systems to choose from.  Pick one and your new droplet will be available for use in a few minutes.


!! New class shell server and student ~VMs

~DigitalOcean won't scale very well for the second half of our course where each student will be provided 5 ~VMs. We'll also need a few for infrastructure.  For this half, I rented an entire server from a [[Hetzner auction|https://www.hetzner.com/sb]] for about 40&euro; per month.  [[Hetzner|https://www.hetzner.com/]] is a German cloud provider and their auction is the cheapest bare-metal option I've found.  The cheapest auction server is at 22&euro; per month, but we'll need more resources than the cheaper ones have.

The server I picked has an 8-core AMD Ryzen 7 1700X CPU with 128gb of RAM and two 1tb datacenter ~NVMe drives.  I expect we'll be running about 125 ~VMs total, so we'll need a lot of RAM.  SSD & ~NVMe drives are significantly faster than spindle, so they should speed things up for us.  With so many ~VMs, we may end up stressing that 8-core CPU though - that's the resource I'm most worried about.

They have an out of band rescue environment that you can SSH into for recovering a server or installing a new OS.  Once they turned the server over to me, I connected to their rescue environment and instructed it to install Debian Linux (version 11) on the server.  It rebooted after the installation was complete and I was able to SSH into our new class server.

Next came the hypervisor so everyone can have their own ~VMs.  I use [[Proxmox|https://www.proxmox.com/en/]] for everything.  This can be installed either directly from its own ISO or [[on top of an existing Debian Linux installation|https://pve.proxmox.com/wiki/Install_Proxmox_VE_on_Debian_11_Bullseye]].  

I only have a single IP address for this new server, so the Proxmox host is also functioning as a router to handle traffic for an internal subnet that will be used for our class ~VMs.  One of the ~VMs will be used as our new class shell server.

After the new class shell server came online, I installed the packages we'll need for our material, copied everyone's home directories & {{File{/opt/pub/ncs205/}}}, and copied everyone's user accounts.  Unless I forgot something, you shouldn't be able to tell the difference between the old and new shell servers.  At the end of next week, after we have some time to settle in to our new home, I'll power down the old class shell server at ~DigitalOcean, create a snapshot of it, and destroy the droplet.  Next semester I'll create a new droplet from this snapshot for the next class's shell server.

On the new Hetzer server, everything is a VM.  This makes it easy to back everything up at the end of the semester and cancel the server to reduce costs.  I can just copy those VM backups somewhere else and transfer them back to a new Hetzner server for the second half of the semester for the next class.  This way I only need to rent a $5 ~DigitalOcean VM for the first half of our course and then the more expensive Hetzner server for the second half.  I expect I'll need to transfer about 15gb from the Hetzner server in Germany to back up the ~VMs.  I can easily download them to my home file server, but uploading them next semester on a slow Spectrum connection will be painful.  Instead, we're using the B2 cloud storage at [[BackBlaze|https://www.backblaze.com/b2/cloud-storage.html]] to store the VM & server backups.  Their pricing is $0.005 per month per GB, so the VM image backups will only cost me about 10 cents per month until the next semester.


! Assignment

<<tiddler [[Lab 51 - Bring test and www online]]>>

//''Note:''// Virtual machine work begins at lab 51
* Labs 43 - 50 were skipped
! Material

!! Read:
* Linux Administration Chapter 7 - Booting and Shutting Down
** Note: Booting into single-user mode is good to know but the book's instructions are incomplete. Here's the new way for Redhat systems (which includes ~CentOS) - https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/

* Linux Administration Chapter 9 - Core System Services
** Skip over //xinetd// and //logging// (pages 181 to 195 and continue with journald on page 195.


! Notes

!! General system startup

* The Boot process
** BIOS (Basic Input/Output System)
*** For motherboard and certain devices
** MBR (First block of the disk)
** Boot loader - Chooses the OS/Kernel and bootstraps the operating system
*** Grub - Grand Unified Boot loader - Standard Linux boot loader
*** Check out grub configs in /boot/grub
*** Use it to boot multiple kernels (such as after a kernel update) or multiple ~OSes
** kernel - /boot/vmlinuz* - loaded into memory and begins to execute
*** Press ESC to see boot messages while the system starts
*** device detection: probe system buses, inventory hardware, and load device driver modules
*** create kernel processes (those in brackets)
*** system becomes available for user processes once the kernel is loaded
** Initialization daemon - First user process, parent of all processes running on the system
*** init - old ~SystemV ~OSes
*** systemd - New method
*** executed by the kernel and responsible for starting other processes
**startup scripts - start system services
* Config files in {{File{/etc/}}}
** Most are single files for the service or resource
*** {{File{fstab}}} : tab = table - filesystem table
*** {{File{resolv.conf}}}
*** {{File{sysconfig}}} directory - extra system configuration files
** Some are multiple files
*** cron is a good example
*** {{File{crontab}}} - traditional cron config table
*** {{File{cron.d}}} - directory containing individual config files
*** {{File{ cron.{daily,monthly,weekly} }}}


!! Systemd
* A new standard init system
* Backward compatible with ~SystemV init
* Can start services in parallel, reducing system start times
* Everything is broken down into units.  
** Two primary unit types to be concerned with
*** service units - Manage a single service
*** target units - manage groups of services
*** {{Command{ systemctl list-units | grep service }}}
*** {{Command{ systemctl list-units | grep target }}}
* Service and target configuration files are stored in {{File{ /{etc,lib}/systemd/system }}}
** Use the {{File{/etc/systemd/system}}} path for custom configs or to override existing
** Stock configs are in {{File{ /lib/systemd/system }}}
** View a list with current state: {{Command{ systemctl list-unit-files &#45;-type=service }}}

Everything is managed by symlinks:
* runlevel.? targets are symlinked to their systemd equiv

<<<
[root@www system]# pwd
/lib/systemd/system
[root@www system]# ll runlevel*
lrwxrwxrwx. 1 root root 15 Oct 21 17:02 runlevel0.target -> poweroff.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel1.target -> rescue.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel2.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel3.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel4.target -> multi-user.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 runlevel5.target -> graphical.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel6.target -> reboot.target
<<<
* default.target symlinked to the desired default runlevel target
<<<
[root@www system]# ll default.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 default.target -> graphical.target
<<<

| !~SysVinit Runlevel | !Systemd Target | !Description |
| 0 |runlevel0.target, poweroff.target|Halt the system|
| 1, s |runlevel1.target, rescue.target|Single user mode|
| 2, 4 |runlevel2.target, runlevel4.target, multi-user.target|User-defined/Site-specific runlevels. By default, identical to 3|
| 3 |runlevel3.target, multi-user.target|Multi-user, non-graphical. Users can usually login via multiple consoles or via the network|
| 5 |runlevel5.target, graphical.target|Multi-user, graphical. Usually has all the services of runlevel 3 plus a graphical login|
| 6 |runlevel6.target, reboot.target|Reboot|
| emergency |emergency.target|Emergency shell|


!!! Examining service configuration files

cat /lib/systemd/system/sshd.service
{{{
[Unit]
Description=OpenSSH server daemon
After=network.target sshd-keygen.service
Wants=sshd-keygen.service

[Service]
EnvironmentFile=/etc/sysconfig/sshd
ExecStart=/usr/sbin/sshd -D $OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
RestartSec=42s

[Install]
WantedBy=multi-user.target
}}}
 - After: What this service depends on
 - Wants:  Additional units tied to this service
 - ~EnvironmentFile - Location to store environment variables or options to startup / shutdown commands
 - ~WantedBy: Runlevel target this service is associated with

Display services wanted by a runlevel target: {{Command{ systemctl show &#45;-property "Wants" multi-user.target }}}
Display services required by a runlevel target: {{Command{ systemctl show &#45;-property "Requires" multi-user.target }}}
Display services that want a particular child service: {{Command{ systemctl show &#45;-property "~WantedBy" sshd-keygen.service }}}

!!! Starting and Stopping

Example commands to start, stop, restart, and check the status of a service immeditately:

* Start: {{Command{systemctl start firewalld.service}}}
* Stop: {{Command{systemctl stop firewalld.service}}}
* Check Status: {{Command{systemctl status firewalld.service}}}
* Restart {{Command{systemctl restart firewalld.service}}}
** Can also be used to reload configuration

Conditional restart - only restart if its already running:  {{Command{ systemctl condrestart firewalld.service }}}

Reload a service to re-read configuration files:  {{Command{ systemctl reload sshd.service }}}

Persistent services - Those to start on system boot:
Newly installed services will not be configured automatically to start on system boot.  You will have to start them manually and set to them start on boot.
- Enable a service to start on boot, eg: {{Command{systemctl enable firewalld.service}}}
- Stop a service from starting on boot, eg: {{Command{systemctl disable firewalld.service}}}


!!! Checking status

{{Command{systemctl list-unit-files &#45;-type=service}}}
{{Command{systemctl status firewalld.service}}}


!!! Adding a new service

For example, adding a new service for Apache.  This is only necessary if you installed a service from source code instead if via package management.  If you install software from a package, that package will come with the necessary files for systemd to manage the service.  This is a good reference to see the internals in case something custom needs to be added or modified.

{{File{/etc/systemd/system/httpd.service}}} :
{{{
[Unit]
Description=Apache Webserver
After=network.target

[Service]
Type=forking
EnvironmentFile=/etc/sysconfig/httpd
ExecStart=/opt/work/apache/bin/httpd -k start $OPTIONS
ExecStop=/opt/work/apache/bin/httpd -k graceful-stop $OPTIONS
ExecReload=/opt/work/apache/bin/httpd -k graceful $OPTIONS

Restart=always

[Install]
WantedBy=multi-user.target
}}}
 - {{Command{man systemd.service}}} for more details.

* Create environment file:  {{Command{ touch /etc/sysconfig/httpd }}}
* Refresh service and target configuration files:  {{Command{ systemctl daemon-reload }}}
* Enable startup on boot:  {{Command{systemctl enable httpd.service}}}
** Symlink was created in multi-user.target.wants:  {{Command{ ll /etc/systemd/system/multi-user.target.wants/ }}}
* Start now: {{Command{systemctl start httpd.service}}}
** Review recent logs associated with the service:  {{Command{ journalctl -u httpd.service }}}


!! Single user mode
Single user mode is method to access systems which cannot fully boot.  The boot process is changed to disable most system startup steps and services so it can be accessed and recovered from the failure.

* How to access systems if problems occur during boot
** Boot from a live CD or recovery mode
** Single user mode
*** Change the grub timeout to 20 seconds so you'll have more time to catch it.  ~VMs often introduce delays accessing the console, thus it can be difficult catching the grub loader if it has a short timeout.
**** Edit the file {{File{/etc/default/grub}}} and add the line {{Monospaced{''~GRUB_TIMEOUT=20''}}} to the bottom of the file
**** Execute {{Command{grub2-mkconfig -o /boot/grub2/grub.cfg}}} to activate the changes
*** See https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/ for instructions to boot in single user mode


Other useful commands:  
* {{Command{shutdown}}} - shutdown / power off the system with many options for doing so
* {{Command{halt}}} & {{Command{poweroff}}}
* {{Command{reboot}}}


! Assignment

Play around with accessing single user mode.  It's a handy thing to know how to do.  The second half of this week is mostly background info that we'll need later.

Be sure you're comfortable using the {{Command{systemctl}}} command to start, stop, and restart services.
! Material

!! Lab & VM notebook:
* Start keeping good notes of what you are doing with your ~VMs.
** The software installed today should be included.
** These notes will come in handy later when you need to repeat these steps on future ~VMs

!! Read - Linux Administration - A Beginner's Guide 
* Chapter 5 - Managing Software
* Our systems are running ~CentOS and will be using the {{Command{rpm}}} & {{Command{yum}}} package management commands.
* It's good to be familiar with the {{Command{rpm}}} command, but we'll mostly be using {{Command{yum}}}.
* We won't be using the DNF package manager.  It hasn't caught on yet like the book was suggesting it would.


! Notes

{{Note{As technology users, we should know by now how to submit usable problem reports.

If you have a problem, please send a report I can work with. I need details of the problem, what you tried, steps you took to diagnose it, documentation you reviewed, screenshots, logs, etc. If you send me something vague like "//X command doesn't work//" with no supporting details, there may not be much I can do for you and I will wait for you to follow up your message with meaningful information. 

The level of assistance I provide will be proportionate to your effort to troubleshoot and supply details. If you do nothing to troubleshoot and send me little information to work with, you should then expect that much effort put into a response.
}}}

!! Expanding our systems

!!! The yum package manager

Package management is one of the customized components of a linux distribution and differs between unix operating systems and linux distributions

The core components of a Linux distribution are:
* Linux kernel
* Base utilities (typically GNU tools)
** Many g* utilities are from the GNU project (eg: gcc)
** Stallman's GNU (GNU's not Unix) project, early 80's.  Wanted to create a totally free OS. Started with the utilities.
** Came from the Free Software Foundation and [[a philosophy of freedom|http://audio-video.gnu.org/video/TEDxGE2014_Stallman05_LQ.webm]] (freedom (speech), not price (beer) ).
** Software should be free to run, change, copy and modify so users are the ones in control, free from corporate control so better software develops - GNU license
** Differ somewhat from ~FreeBSD tools (sed is a good example)
* Package manager.

Extra (optional) components:
* Specialized utilities (Like the tools that come with a distro like Kali)
* X server / Window manager

Each distribution combines these components in different ways depending on their focus and goals.

Redhat based systems (including ~CentOS) use the RPM package format and rpm package system with the yum package management utility.

Other package management systems exist for other distros
 - apt  (Debian & Ubuntu)
 - portage  (Gentoo)
 - ports  (~FreeBSD)
 - ~DistroWatch [[Package management cheat sheet|http://distrowatch.com/dwres.php?resource=package-management]]

{{Command{rpm}}} - very basic utility
* It will mainly just install, update, or remove packages
* You will need to acquire the .rpm package file yourself or have a direct URL for it
** A .rpm file is a collection of pre-compiled binaries, configuration files, and support files for an application compiled for the target architecture.
* Conflicts and dependencies will need to be sorted out manually

{{Command{yum}}} - high level utility for package management
* will interact with repositories (collections of .rpm files) to obtain packages
* takes care of any conflicts
* will install necessary dependencies
* records what is installed and any changes made to the system to facilitate updates, package removal, or audit.

Different package repositories (repos)
* Repository configuration files are stored in {{File{/etc/yum.repos.d/}}}
* A repository is the central distribution point for our linux packages
* Typically, each distro has its own repository on the internet for base packages
* The repository creator determines which applications it contains
* Repos are mirrored for accessibility and speed.
* Other repositories offer additional packages

EPEL (Extra Packages for Enterprise Linux)
Distributed by the Fedora Project to offer a repository of add-on packages
See: https://fedoraproject.org/wiki/EPEL

Don't run this until we need the EPEL repo 
{{Command{yum install epel-release}}}

Other specialized repositories may exist:
 - HP repo for their utilities (raid utils)


!!! Yum package manager commands:

{{Command{yum}}}
<<<
The primary command for the yum package manager.  Run this by itself to see all sub-commands
<<<

{{Command{yum repolist}}}
<<<
Display the configured repositories
<<<

{{Command{yum check-update}}}
<<<
Check the repositories for any available updates and display the results, without applying an updates.
<<<

{{Command{yum update}}}
<<<
Check the repositories for any available updates.  After reviewing the results, the user will be prompted to apply them.
''A {{Command{reboot}}} will be required if a kernel update is included in the list.  Otherwise, only updated services may need to be restarted for the updates to take effect.''
<<<

!!!! yum cleanup:
{{Command{yum clean packages}}}
<<<
Remove cached packages after install is completed.
<<<

{{Command{yum clean metadata}}}
<<<
Remove the XML metadata cache
<<<

{{Command{yum clean dbcache}}}
<<<
Clean the yum ~SQLite database
<<<

{{Command{yum clean all}}}
<<<
Remove all cached yum content
<<<

{{Command{yum makecache}}}
<<<
Download and rebuild the repo metadata cache
<<<


{{Command{yum update //package_name//}}}
<<<
Update a single package
<<<

{{Command{yum provides "*/ssh"}}}
<<<
See which package provides the file named ssh. 
<<<

{{Command{yum info //package_name//}}}
<<<
Display information on the specified package
<<<

{{Command{yum install //package_name//}}}
<<<
Install a package from yum
<<<

{{Command{yum search //string//}}}
<<<
Search the repository for packages matching //string//
<<<

{{Command{yum deplist package_name}}}

{{Command{yum list installed}}}

{{Command{yum remove //package_name//}}}


!!!! Fixing damaged configuration files
It's a common occurrence that a configuration file is accidentally damaged during the course of completing these labs and a service will not load as a result.  This sequence of commands will demonstrate comparing the configuration file on the system to the default which was installed as part of the package.  This comparison should help identify such configuration errors.

* Show the package which installed a particular file:  {{Command{rpm -qf /etc/named.conf}}}
* Display changes made since the original file was installed:  {{Command{rpm -V bind}}}

{{{
[root@core ~]# rpm -qf /etc/named.conf
bind-9.11.4-26.P2.el7_9.4.x86_64

[root@core ~]# rpm -V bind-9.11.4-26.P2.el7_9.4.x86_64
S.5....T.  c /etc/named.conf
}}}

The following table explains the letters in the above output:
| !Code | !Description |
| S |file Size differs|
| M |Mode differs (includes permissions and file type)|
| 5 |~MD5 sum differs|
| D |Device major/minor number mismatch|
| L |readLink(2) path mismatch|
| U |User ownership differs|
| G |Group ownership differs|
| T |mTime differs|
| P |caPabilities differ|

* Rename the original configuration file:  {{Command{mv /etc/named.conf /tmp}}}
* Reinstall the package:  {{Command{yum reinstall bind}}}
** A configuration file will only be reinstalled from the package if it is missing from the expected location.
* Compare the default configuration file to the renamed copy:  {{Command{diff /etc/named.conf /tmp/named.conf}}}
** Lines beginning with &lt; are the version in the file listed as argument one
** Lines beginning with &gt; are the version in the file listen as argument two

{{{
[root@core ~]# diff /etc/named.conf /tmp/named.conf
13c13
<       listen-on port 53 { 127.0.0.1; };
---
>       listen-on port 53 { any; };
21c21,24
<       allow-query     { localhost; };
---
>       allow-query     { any; };
}}}

There's no error here; these changes are expected.  This only demonstrates the process.  But it should be helpful for identify a damaged or missing line.

Once that damaged line is identified, either merge it into your backup in /tmp/ or repeat your modifications to the new clean copy.


!!!! Additional yum commands:

* Yum package groups:
** {{Command{yum grouplist}}}
** {{Command{yum groupinfo //group_name//}}}
** {{Command{yum groupinstall //group_name//}}}


* Yum plugins
** Extend the functionality of the yum package manager
** See available plugins with {{Command{yum search yum-plugin}}}

yum-plugin-security - Check currently installed software for security updates.  Requires a subscription.

{{Command{yum &#045;-security check-update}}}
{{Command{yum &#045;-security update}}}

{{Command{yum updateinfo list available}}}
{{Command{yum updateinfo list security all}}}

https://access.redhat.com/solutions/10021

yum-utils - Extra utilities for working with yum


We can install most required software using packages with yum:
* Keep a record of what is installed as we go.
* Get started with: 
** Install on all systems: man wget nc telnet bind-utils openssh-clients rsync bzip2


We can also install software directly from source archives
* Source archives are typically distributed as compressed tarballs
* Latest versions of software are not always available via package
* Building from source allows for additional customizations
* and a higher level of control
* Multiple versions of a program can easily be maintained on the same system by installing to different locations
** But you must keep them up to date (patched) and sort out any dependencies manually.

For this class, we'll only be installing software from packages via yum.

!! Install web server software

The following tasks must now be completed to bring your web server online.  Refer to the notes above and in last weeks pages to identify the proper commands to achieve these goals.

Complete these tasks on your web server VM:
# Install the following packages:  httpd httpd-tools php telnet
# Set the ''httpd'' service to start on system startup
# Start the ''httpd'' service now

!! Verify the service with the {{Command{telnet}}} & {{Command{curl}}} commands

The {{Command{telnet}}}, {{Command{curl}}}, and {{Command{nc}}} commands are excellent tools for verifying that you're able to communicate with a host or a service.  These are great for troubleshooting and everyone should know how to use all three.

Here I'm using telnet to connect to my web server on localhost.  Run the telnet command to make a TCP connection and then begin speaking HTTP to the server.  The HTTP command {{Command{GET /}}} will return the website.  A lot of HTML will be returned, so I only have the first couple lines in the sample output below.

{{{
[root@www ~]# telnet localhost 80
Trying ::1...
telnet: connect to address ::1: Connection refused
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
GET /
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
(truncated)
}}}


The {{Command{curl}}} command is another great tool for verifying TCP services and is generally available on every unix system.  We'll add the {{Monospaced{-v}}} flag here for additional verbosity that's helpful for troubleshooting.  Again, I'm truncating the output.

{{{
[root@www ~]# curl -v http://localhost/
* About to connect() to localhost port 80 (#0)
*   Trying ::1...
* Connected to localhost (::1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Tue, 30 Mar 2021 19:44:15 GMT
< Server: Apache/2.4.6 (CentOS) PHP/7.3.27
< Last-Modified: Tue, 30 Mar 2021 10:17:46 GMT
< ETag: "56-5bebe4f44343d"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
(truncated)

* Connection #0 to host localhost left intact
}}}

Once a successful connection has been made, view the apache log files to verify the connection.
 - Apache logs are located in {{File{/var/log/httpd/}}}

A log entry for a successful connection will resemble the following.  Note the ''200'' HTTP status code:
{{{
192.168.12.10 - - [05/Apr/2023:10:45:42 -0400] "GET / HTTP/1.1" 200 86 "-" "curl/7.29.0"
}}}

When we're starting out, you might instead see a ''403'' status code indicating an error:
{{{
127.0.0.1 - - [05/Apr/2023:10:59:26 -0400] "GET / HTTP/1.1" 403 4897 "-" "curl/7.29.0"
}}}

We can see why in the {{File{error_log}}} file:
{{{
[Wed Apr 05 10:59:26.052681 2023] [autoindex:error] [pid 1659] [client 127.0.0.1:44834] AH01276: Cannot serve directory /var/www/html/: No matching 
DirectoryIndex (index.html,index.php) found, and server-generated directory index forbidden by Options directive
}}}

The web server is missing the file for its default website, the {{File{index.html}}}, and cannot proceed.  We will fix this error in the //Web Server// lab below.


Now try to connect to your web server from your test VM using {{Command{telnet}}} or {{Command{curl}}}.  If you use telnet, don't forget to send the {{Command{GET /}}} command.

{{{
[root@test ~]# telnet 192.168.12.25 80
Trying 192.168.12.25...
Connected to 192.168.12.25.
Escape character is '^]'.
GET /
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
( truncated )
}}}


Your web server is now online.  We'll work with it further in the 2nd half of this week's material.


! Additional Material

[[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - Chapter 14 (Package Management)


!! Useful commands:
* {{Command{wget}}} - great tool for downloading files from a web or FTP server
* {{Command{tar}}} - Standard linux archive tool.  Files are usually distributed or stored as tarballs (the Linux equivalent of Zip).  This tool will create or extract them
* {{Command{telnet}}} - Useful tool for testing TCP ports
* {{Command{curl}}} - Useful tool for testing TCP ports or downloading content from the web
* {{Command{apachectl}}} - Tool to manage an Apache server.  Good to know if exists, but we likely won't be using it.


!! References

Yum quick reference:  http://yum.baseurl.org/wiki/YumCommands.html


! Assignment

<<tiddler [[Lab 52 - VM updates & software installation]]>>
<<tiddler [[Lab 53 - Web Server]]>>
! Material

!! Reading

Read Linux Administration - A Beginner's Guide Chapter 19 (//Apache Web Server//)

* Keep in mind our ~DocumentRoot directory should now be {{File{/opt/work/htdocs}}}


! Notes

At this point the web servers should be online and serving a basic web site from our new ~DocumentRoot directory.  We'll now use that directory to set up a more sophisticated website.

!! Web Services 

There are many different web server options - 
* Apache - One of the most common web server software packages
** LAMP stack (Linux, Apache, mySQL, PHP)
* nginx - Lighter weight for higher performance, speed, and reduced memory footprint.  Another very popular option.
* python - {{Command{python -m ~SimpleHTTPServer [port]}}}
**  This is a very useful way to stand up fast and simple web servers anywhere.  It's handy for quick data exfiltration. 
* IIS - Microsoft's web server package for Windows

[[Netcraft Web Server Survey|https://news.netcraft.com/archives/2022/02/28/february-2022-web-server-survey.html]]
* Web server market share and stats over the last 10 years
* Apache used to be the most popular but has been steadily losing steam over the last few years with nginx gaining ground.


Default web site files:
* Apache {{File{htdocs}}} directory - ''h''yper''t''ext ''docs'' - The root of our web site.  These are the files our web server will provide
* {{File{index.html}}}, {{File{index.php}}} or whatever we define via the ~DirectoryIndex configuration option in httpd.conf
** This is the default page to provide if only a directory is given (eg, http://www.ncs205.net/)
* Or display directory listing if no file specified and the ~DirectoryIndex directive is enabled

Process ownership
* {{Command{ps aux | grep httpd}}} - Apache runs as an unprivileged user
* Any scripts executed will run as this user
* This protects the system from malicious or vulnerable scripts
** If a script is compromised, the attacker will only be able to access what that unprivileged user can access
** This kind of privilege separation and isolation are important security concepts to follow

Headers
* Extra information sent by the web server describing the connection and server data
* Header information provides useful troubleshooting and security metadata
* They're often hidden by your web browser, but you can see them in the developer tools or the command line with {{Command{curl}}}
** Use the {{Monospaced{ ''-v'' }}} {{Command{curl}}} option to see the headers ({{Monospaced{ ''-v'' }}} usually means verbose output for most commands)
{{{
[root@www ~]# curl -v -s http://localhost
* About to connect() to localhost port 80 (#0)
*   Trying ::1...
* Connected to localhost (::1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Mon, 28 Mar 2022 02:57:50 GMT
< Server: Apache/2.4.6 (CentOS) PHP/5.4.16
< Last-Modified: Thu, 24 Mar 2022 03:46:42 GMT
< ETag: "56-5daeeb1c16b20"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<HTML>
<BODY>
<BR><BR><BR>
<center><B>Welcome to NCS205!</B></center>
</BODY>
</HTML>
* Connection #0 to host localhost left intact
}}}

~VirtualHosts
* A header value can be set containing the host name used to access the web server
* The server software will examine this header value to determine which site to display
* This allows for multiple web sites per server, depending on the host name used to access the server
** IP based virtual hosts - A single server and apache instance will be accessible by multiple IP addresses with each IP address linked to a different web site
** Name based virtual hosts - Multiple host names resolve to the same IP address.  Examine the hostname in the HTTP headers to determine which site to serve.


!!! Apache Modules
* Addons to Apache - Modules provide an extensible framework for additional functionality
** Static - Compiled in to Apache.  Apache must be recompiled to add support for new modules or to update them
** DSO - Dynamic Shared Objects - Compiled separately and loaded when apache starts
** apxs (Apache Extension tool) - A perl script to assist with building and installing apache DSO modules
** PHP was added as an Apache DSO module
* Check available modules
** Those compiled in: {{Command{apachectl -l}}}
** Available as DSO: {{Command{ls -l /etc/httpd/modules/}}}
* Add new modules with apxs (The manual way of doing things.  We'll use the automated packages for our labs)
** {{Command{apxs -c mod_foo.c}}}
** {{Command{apxs -ian foo mod_foo.la}}}
** Must then add a ~LoadModule directive to the apache config file
* Module examples:
** php - A robust web scripting language
** mod_rewrite - Provides a rule-based rewriting engine to rewrite requested URLs on the fly
** mod_security - Provides intrusion detection and prevention for web applications
** mod_limits - Limit the number of connections per host
** mod_headers - Customization of HTTP request and response headers
** Authentication modules - Different method for authenticating web users
** https://httpd.apache.org/docs/current/mod/ - More available modules


!!! Apache configuration

!!!! Server level
* Main configuration file {{File{conf/httpd.conf}}}
** Configuration extras - {{File{conf.d/*.conf}}} & {{File{conf.modules.d/*.conf}}}
** A quick way to make common additional functionality available
** ie: SSL support, virtual hosts, user web sites

!!!! User level
{{File{.htaccess}}} files - Modify permitted configuration values per web directory


!!!! Module configuration
* A separate configuration file to tune php.
* By default {{File{ /etc/php.ini }}}

Set the following values in your php.ini file:
{{{
session.save_path = /tmp/php
log_errors = on
error_log = /var/log/httpd/php.log

date.timezone = "America/New_York"
disable_functions = system, exec, shell_exec
}}}

Be sure to create the directory {{File{/tmp/php}}} and make it owned by the apache user:
* {{Command{chown apache /tmp/php}}}


!!! Scripting
* A means to develop applications to generate dynamic web content 
* php - A standard server side scripting language for web development
** Change your {{File{ index.html }}} (located in {{File{ /opt/work/htdocs/}}}) to {{File{ index.php }}} and add the {{Monospaced{phpinfo();}}} php function to it
{{{
# cat /opt/work/htdocs/index.php
<HTML>
<BODY>
<CENTER>Welcome to NCS205!</CENTER>
<?php
// The line above instructs the php module to start processing php scripting
phpinfo();		// This function will display information and configuration for our php installation
// The line below instructs the php module to stop processing php scripting.
?>
</BODY>
</HTML>
}}}

Now {{Command{curl http://localhost/index.php}}} will execute the {{Monospaced{phpinfo()}}} function and return a dump of the server configuration.


* scripts typically run as the apache process (for modules) 

Keep web applications up to date!
* Security vulnerabilities are constantly discovered in web applications
* These vulnerabilities become attack vectors against the hosting server


!!! Extras

Basic HTTP is stateless: 
* Client makes a request, server responds, connection closed.
* cookies and session files can be used to maintain state between connections
** Cookies are files stored on your system to retain session information
** Authentication information may be stored in these cookies
** Leaking cookies is just as bad as leaking credentials

Content Distribution Networks (CDN)
* Globally or nationally distribute static content close to the end user
** Static content is cached to reduce load on the primary web server
** Serving content from a closer datacenters improves speed
* Examples:
** Cloudflare (Free/low cost tiers for experimenting and getting familiar with these concepts)
** Akamai


!!! Disable ~SELinux

~SELinux are ''s''ecurity ''e''xtensions designed to protect Linux systems. ~SELinux will prevent the ~LocalSettings.php file from being read. The website will say it cannot read the file even though the file permissions are correct. ~SELinux must be disabled if it is not already.
- Otherwise we need to properly configure it, which is out of scope for this class.

To disable ~SELinux (it may already be done):
* Disable ~SELinux now:  {{Command{setenforce 0}}}
* To disable ~SELinux on boot, edit the file {{File{/etc/selinux/config}}} and change ''enforcing'' to ''disabled'' on the line that is not commented out.
* Check the status of ~SELinux with the command:  {{Command{getenforce}}}.  It should return the output ''disabled''.

!! Install and configure ~MediaWiki

!!! 1. Install the ~MediaWiki web software package

Download the package from the [[MediaWiki web site|http://www.mediawiki.org/wiki/MediaWiki]] with {{Command{wget}}}
* Look for the downloads section and find the latest version in .tar.gz format.  
* Current file name is {{File{mediawiki-1.41.1.tar.gz}}} as of writing this lab.  The latest version and file name may have incremented since then.
Save it to the directory {{File{/opt/work/htdocs/}}}
Extract the tarball with a command resembling:  {{Command{tar -xf mediawiki-1.41.1.tar.gz}}}
Rename the base directory from ''mediawiki-//version//'' to ''wiki''


!!! 2. Install the ~MariaDB database package

~MediaWiki will need a database to install its dynamic content.  We'll now need to install the ~MariaDB database package and php extension for connecting to the database.

!!!! Set up the ~MariaDB package repo
The version of ~MariaDB in the default ~CentOS package repository is now too old for ~MediaWiki.  We'll instead add a custom package repository to our system and install a newer version.
# Change to {{File{/tmp/}}} and use it for downloading this temporary file: {{Command{cd /tmp}}}
# Download the setup script for the ~MariaDB package repository:  {{Command{wget https://r.mariadb.com/downloads/mariadb_repo_setup}}}
# Run it:  {{Command{bash mariadb_repo_setup}}}
!!!! Install and start the ~MariaDB database packages
# Install the database packages: {{Command{yum install mariadb-server mariadb}}}
# Set the {{Monospaced{mariadb}}} service to start on boot
# Start the {{Monospaced{mariadb}}} service now

!!!!  ''a.'' Verify the services are running
Check the status of the {{Monospaced{mariadb}}} service with the command {{Command{systemctl status mariadb}}}.  The service ''must'' be enabled and active before proceeding.

[img[img/db-status.png]]


!!!!  b. Create wiki user

Now that the database service is installed, we need to create a user and database for the wiki.  

The {{Command{mariadb}}} command can be used to connect to the database service as root, the database superuser.  There is currently no root password set.  Just press enter if prompted for one.  Once you are connected to the database, enter the remaining commands at the database prompt.  You may supply whatever password you'd like for //wiki_pass//, just be sure to remember what you use.

Execute the following commands:
{{Commands{
[root@www ~]# ''mariadb -u root''

Welcome to the ~MariaDB monitor.  Commands end with ; or \g.
Your ~MariaDB connection id is 5
Server version: 11.3.2-~MariaDB ~MariaDB Server

Copyright (c) 2000, 2018, Oracle, ~MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

~MariaDB [(none)]> ''CREATE USER 'wiki'@'localhost' IDENTIFIED BY 'wiki_pass';''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''CREATE database wiki;''
Query OK, 1 row affected (0.00 sec)

~MariaDB [(none)]> ''GRANT select, insert, update, delete, create, alter, index ON wiki.* TO 'wiki'@'localhost';''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''flush privileges;''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''exit''
Bye
}}}

{{Note{''Note:'' If you goof the wiki user's password, it can be reset by logging into the database as the root user and running:  {{Monospaced{''SET PASSWORD FOR 'wiki'@'localhost' = PASSWORD('new_password');''}}}.  Replace //new_password// with whatever you want the password to be.  Then run the {{Monospaced{''flush privileges''}}} database command.}}}


Now test your connection to the database by logging into it with the new wiki user:

{{Commands{
[root@www ~]# ''mariadb -u wiki -p wiki''
Enter password:

Welcome to the ~MariaDB monitor.  Commands end with ; or \g.
Your ~MariaDB connection id is 5
Server version: 11.3.2-~MariaDB ~MariaDB Server

Copyright (c) 2000, 2018, Oracle, ~MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

~MariaDB [wiki]> ''exit''
Bye
[root@www ~]#
}}}

!!! 3. Quick Verification: 

Our server software is installed; let's use {{Command{curl}}} to verify everything is working so far:

{{{
[root@www htdocs]#  curl http://localhost/wiki/
<!DOCTYPE html>
<html lang="en" dir="ltr">
        <head>
                <meta charset="UTF-8" />
                <title>MediaWiki 1.41.1</title>
                <style media="screen">
                        body {
                                color: #000;
                                background-color: #fff;
                                font-family: sans-serif;
                                padding: 2em;
                                text-align: center;
                        }
                        p, img, h1, h2, ul {
                                text-align: left;
                                margin: 0.5em 0 1em;
                        }
                        h1 {
                                font-size: 120%;
                        }
                        h2 {
                                font-size: 110%;
                        }
                </style>
        </head>
        <body>
                <img src="/wiki/resources/assets/mediawiki.png" alt="The MediaWiki logo" />
                <h1>MediaWiki 1.41.1 internal error</h1>
                <p>
                        MediaWiki 1.41.1 requires PHP 7.4.3 or higher; you are using PHP 5.4.16.
                </p>
                <h2>Supported PHP versions</h2>
                                <p>
                        Please consider <a href="https://www.php.net/downloads.php">upgrading your copy of PHP</a>.
                        PHP versions less than v8.1.0 are no longer supported by the PHP Group and will not receive
                        security or bugfix updates.
                </p>
                <p>
                        If for some reason you are unable to upgrade your PHP version, you will need to
                        <a href="https://www.mediawiki.org/wiki/Download">download</a> an older version of
                        MediaWiki from our website. See our
                        <a href="https://www.mediawiki.org/wiki/Compatibility#PHP">compatibility page</a>
                        for details of which versions are compatible with prior versions of PHP.
                </p>
        </body>
</html>
}}}


If you look closely at the error above, you'll see that ~MediaWiki isn't happy about our version of php.  The downside of Enterprise versions of operating systems and software is they tend to favor old and stable versions instead of the latest versions of software packages.  Here, ~MediaWiki is complaining that the default version offered by the standard yum repository is too old.  We're going to need to update php and deviate from the standard software repositories to do so.

!!!  4. Run these commands to update to a newer version of php:

* Install the EPEL repository:  {{Command{yum install epel-release}}}
* Install the [[Remi Release|https://rpms.remirepo.net/]] repository for ~CentOS 7: {{Command{yum install http://rpms.remirepo.net/enterprise/remi-release-7.rpm}}}
* Install the yum-utils package: {{Command{yum install yum-utils}}}
* Enable the php 8.1 yum repository from Remi: {{Command{yum-config-manager &#045;-enable remi-php81}}}
* Install the new php:  {{Command{yum install php}}}
* Verify the version of php:  {{Command{php -v}}}
** It should now be (roughly) version 8.1.27
* Also install the following packages.  These will be required by ~MediaWiki:  {{Monospaced{''php-mbstring php-xml php81-php-gd php-intl php-mysql''}}}
** A lot of dependencies will be required.  Install them too.
* Restart apache to activate the new version of php and its new extensions:  {{Command{systemctl restart httpd}}}

Now when you run our connection test, you shouldn't get any version errors or warnings about missing modules:

{{{
[root@www ]# curl http://localhost/wiki/
<!DOCTYPE html>
<html lang="en" dir="ltr">
        <head>
                <meta charset="UTF-8" />
                <title>MediaWiki 1.41.1</title>
                <style media="screen">
                        body {
                                color: #000;
                                background-color: #fff;
                                font-family: sans-serif;
                                text-align: center;
                        }

                        h1 {
                                font-size: 150%;
                        }
                </style>
        </head>
        <body>
                <img src="/wiki/resources/assets/mediawiki.png" alt="The MediaWiki logo" />

                <h1>MediaWiki 1.41.1</h1>
                <div class="errorbox">
                        <p>LocalSettings.php not found.</p>
                                <p>Please <a href="/wiki/mw-config/index.php">set up the wiki</a> first.</p>
                </div>
        </body>
</html>
}}}

We should be ready to configure the wiki software.



!! Bypassing network restrictions with proxies

A proxy is a middle man, passing on network requests to their destination on your behalf.  Our web server ~VMs are behind the lab infrastructure's router and cannot be accessed outside of that LAN.  We'll need to use a proxy in order to view the wiki sites in our browsers at home.

See the [[Tunnels & Proxies with SSH]] page for more information on how to set up a SOCKS proxy with SSH to access protected resources.


!!! 5. Configure your ~MediaWiki

With a SSH proxy in place, you should be able to complete the configuration of your wiki.  After establishing the tunnel, browse to http://your_www_ip_address/wiki/ to reach the configuration page.  It will look something like this.

[img[img/MediaWiki.png]]


As you are stepping through the configuration page, be sure to use these values:
* database host:  {{Monospaced{'' localhost ''}}}
* database name: {{Monospaced{'' wiki ''}}}
* database username: {{Monospaced{'' wiki ''}}}
* database password: ''// whatever password you used above //''

Once the Wiki setup is complete, you will be prompted to download the {{File{~LocalSettings.php}}} file to your home computer.  This file must then be uploaded to the {{File{/opt/work/htdocs/wiki/}}} directory on your web server.  You will be able to fully access your wiki after this file is uploaded.  
 - Our class ~VMs are on [[RFC 1918|https://datatracker.ietf.org/doc/html/rfc1918]] IP addresses. You cannot connect directly to your web server VM from home to upload the {{File{~LocalSettings.php}}} file.  It must be first uploaded to the class shell server.
 - The [[Virtual Machines]] page (linked on the top menu bar) has a diagram of our lab infrastructure which may be helpful.

The {{Command{scp}}} or {{Command{sftp}}} tools may be helpful for transferring files on the command line between ~VMs.


! Assignment

!! Web Server:
<<tiddler [[Lab 54 - Set up MediaWiki]]]>>
! Material
!! Watch
NTP Tutorial: https://www.youtube.com/watch?v=eh5ZL_fNi0g

!! Read
* Linux Administration - The Logging Daemon / rsyslog / journald
** 7th Ed: Pages 230-240 in Chapter 8
** 8th Ed: Pages 187-197 in Chapter 9


! Time & Logging

Time and logging go together.  It's incredibly useful to know //when// something happened if you need to investigate a problem or security incident, especially if you need to correlate events among systems to build an accurate timeline.  If time is not properly synchronized among your systems, it's difficult to properly understand sequences of events.  You may also run into functional issues if time is wildly incorrect.  For example, SSL certificates may be considered invalid if time is wrong on a system.

!! Time

What's the [[difference between accuracy and precision|https://www.thoughtco.com/difference-between-accuracy-and-precision-609328]]?

Importance of accurate time:
* file timestamps - know when something was modified
* tracing events - know when a breech occurred, when a change was made, or when someone logged in to a system
* security
** certificate validity - Certificates are only valid for a certain time range.  If a system's time is off, it may negatively impact secure communication

Importance of precise time:
* correlating activities between systems

Ideally we'll have both - accuracy and precision.  We want the correct time on all systems.  But the closer time is among systems in a network, the easier it will be to correlate events between them.


!!! Setting the system date
* {{Command{tzselect}}} - Select the time zone.  This is typically done for you on most modern installs
* The symbolic link {{File{/etc/localtime}}} will point to the timezone file to use
** Timezone definition files are typically stored within {{File{/usr/share/zoneinfo/}}}
* {{Command{date ~MMDDhhmm&#91;[CC]YY]}}} - set the system date and time manually
** It's usually not necessary to set the date/time and timezone in a VM.  ~VMs should obtain their time from the host.

!!! Network Time Protocol (NTP)
* NTP provides an automated way to keep time in sync and counter clock drift.
* A local server is configured to query a pool of many time servers and the best candidates will be used to keep the clock in sync
* They can maintain time to the millisecond 
* Clock strata - Distance from the reference clock
** Stratum 0 - The reference clock.  High precision, high accuracy clocks, such as atomic, GPS, or radio.
** Stratum 1 - Primary time servers.  Systems directly attached to and synchronized with stratum 0 clocks
** Stratum 2 - Secondary time servers.  Systems synchronized to stratum 1 time servers over the network.
** Stratum n+1 up to 15 - Time servers synchronized to a lower stratum clock
** Stratum 16 - An unsynchronized clock.

!!!! NTP Commands:
* {{Command{ntpdate}}} - Client utility.  A one-time immediate clock update.  Requires a time server to use as an argument.
** eg: {{Command{ntpdate 0.pool.ntp.org}}}
** Can be enabled to run on boot to force a time update on system startup.
*** The {{Command{ntpd}}} service may not close a large time skew.  Running {{Command{ntpdate}}} will update the time so {{Command{ntpd}}} can keep it up to date.
* {{Command{ntpd}}} - Background service to maintain time synchronization
** Sets and maintains system time in sync with a central point
** Regularly polls one or more time servers for updates
** The ntpd service updates time slowly in small steps
** May use an internet-based source time server or a local one.
*** Generally, a large site will maintain an ntp server locally that other systems on the local network will synchronize against
*** Using a local service increases security and reduces strain on the public NTP servers
** An NTP service may be configured to provide time synchronization to client systems
** {{Command{ntpstat}}} - show the status of the current ntp service
** {{Command{ntpq}}} - query an ntp server
*** ntpq sub commands:
**** peers
**** associations
*** Tally codes for peers:
**** blank - Unreachable and discarded
**** - - Considered an outlier and discarded
**** + - Providing data and a candidate for use
**** * - The system peer and providing data

** {{Command{ntpdc}}} - control an ntp server

!!!!! {{Command{ntpq peers}}} command example for a fully synchronized NTP client:
[img[img/ntpq-peers.jpg]]
Left to right:
* Red - Tally code.  Here it is indicating an accepted NTP peer which is providing data
* Green - Remote NTP server.  Who we are obtaining our time from.
* Yellow - the time reference our source is using
* Blue - Stratum level of our time source 
* Orange - Connection type.  U means unicast
* Purple - Connection statistics 



{{Warning{''Warning'':  NTP is a very basic protocol that uses UDP port 123 for its communication.  The ntpd service will bind to that port and any client tools will try to communicate over the same port.  If the ntpd service is running and bound to the port to listen for connections, the port is then not available for the {{Command{ntpdate}}} client tool to use.  If you must run {{Command{ntpdate}}}, stop the ntpd service to free up the socket and then start it back up again after running {{Command{ntpdate}}}.  Successful time syncronization will return output similar to:
> {{Monospaced{2 Oct 18:48:39 ntpdate[14371]: adjust time server 192.168.0.1 offset 0.000057 sec}}}
}}}


UDP Reflection attacks against NTP
* Reflection attacks are a big problem.  DNS and NTP were popular targets
** A tiny network request by an attacker can be "reflected" to its intended target.  
** Generally the amount of network traffic generated by the reflection toward the target is significantly larger than the request
** A ~Denial-of-Service attack is launched requiring only minimal resources of the attacker
* http://blog.cloudflare.com/technical-details-behind-a-400gbps-ntp-amplification-ddos-attack/
* https://ics-cert.us-cert.gov/advisories/ICSA-14-051-04
* monlist command
** This is what what abused in the last NTP reflection attack.  A small NTP request would return a very large response.  
** The request IP address would be spoofed so the response is sent to the victim
* ntpdc -n -c monlist core.merantn.ncs205.net
* To protect against this attack: 
** disable monitor
** add the noquery restriction to the configuration


!! Logging

Unix logging is an excellent resource and can quickly solve a lot of problems for you.

Here's a great example.  I have a typo in my Apache configuration file and the service will not restart.  The log entry details exactly what the problem is and where the problem originates:

{{{
[root@www conf]# systemctl restart httpd
Job for httpd.service failed because the control process exited with error code. See "systemctl status httpd.service" and "journalctl -xe" for details.

[root@www conf]# systemctl status httpd
 httpd.service - The Apache HTTP Server
   Loaded: loaded (/usr/lib/systemd/system/httpd.service; enabled; vendor preset: disabled)
   Active: failed (Result: exit-code) since Wed 2020-04-08 23:50:48 EDT; 4s ago
     Docs: man:httpd(8)
           man:apachectl(8)
  Process: 2170 ExecStop=/bin/kill -WINCH ${MAINPID} (code=exited, status=1/FAILURE)
  Process: 2168 ExecStart=/usr/sbin/httpd $OPTIONS -DFOREGROUND (code=exited, status=1/FAILURE)
 Main PID: 2168 (code=exited, status=1/FAILURE)

Apr 08 23:50:48 www systemd[1]: Starting The Apache HTTP Server...
Apr 08 23:50:48 www httpd[2168]: AH00526: Syntax error on line 1 of /etc/httpd/conf/httpd.conf:
Apr 08 23:50:48 www httpd[2168]: Invalid command 'my', perhaps misspelled or defined by a module not included in the server configuration
Apr 08 23:50:48 www systemd[1]: httpd.service: main process exited, code=exited, status=1/FAILURE
Apr 08 23:50:48 www kill[2170]: kill: cannot find process ""
Apr 08 23:50:48 www systemd[1]: httpd.service: control process exited, code=exited status=1
Apr 08 23:50:48 www systemd[1]: Failed to start The Apache HTTP Server.
Apr 08 23:50:48 www systemd[1]: Unit httpd.service entered failed state.
Apr 08 23:50:48 www systemd[1]: httpd.service failed.
}}}

Notice the lines above:
''Apr 08 23:50:48 www httpd[2168]: ~AH00526: Syntax error on line 1 of /etc/httpd/conf/httpd.conf:''
''Apr 08 23:50:48 www httpd[2168]: Invalid command 'my', perhaps misspelled or defined by a module not included in the server configuration''

If I examine line 1 of my configuration file as the log suggests, I'll spot my problem - text that doesn't conform to the required syntax of the file.

{{{
[root@www conf]# head -5 httpd.conf
my typo
# This is the main Apache HTTP server configuration file.  It contains the
# configuration directives that give the server its instructions.
# See <URL:http://httpd.apache.org/docs/2.4/> for detailed information.
# In particular, see
}}}

Syslog:
* The syslog service is the primary recipient of system-level event log information
** syslog then determines what should be done with that log data based on configuration
*** save it locally, send it to another system for log aggregation, or discard it
** Allows for centralized log collection and management
* Some utilities/services log directly to their own files and some use syslog
** Apache is an example of a service that saves log data to its own files
* syslog events are written to the domain socket /dev/log 
** sockets provide inter-process communication via the filesystem
** Processes either communicate via open network ports or these socket files
* log events contain the timestamp, type, severity, and details
* Most log files are plain text, allowing review or parsing with standard unix CLI tools, such as the filters we've been working with

* syslog events consist of pre-defined facility and severity levels
** facility is generally the service that generated the message (auth, cron, ftp, mail) and based on standardized names
*** local0-7 facilities are for customized destinations
*** or the keyword none to disable a particular facility or severity
** severity ranges from emergency to debug
*** When specified, that severity level and greater will be processed
** See the /var/log/messages example in /etc/rsyslog.conf
** Here's a list:

[img[img/syslogFacility.jpg]]
[img[img/syslogSeverity.jpg]]


Most services can elevate verbosity for debugging, recording additional information to assist with troubleshooting.
 - This should only be enabled for a short time.  The extra log entries may consume a lot of space over a long period of time.

!!! syslog components:
* syslogd - the logging service which receives and processes the log information
** {{File{/etc/rsyslog.conf}}} - The main configuration file
** {{File{/etc/rsyslog.d/}}} - The secondary configuration files
* library routines to submit log messages to syslogd
* {{Command{logger}}} - userland utility for recording log events from the shell.  Handy for scripting.
** Monitor or debug your automated scripts
** Backups and account processing are good examples
** {{Command{logger -t $0 -p local5.warning "test message"}}} - Send a test message to syslog from within a shell script with the local5 facility and warning severity
* logrotate / newsyslog - rotate logs at a configured time or file size
** It's important to rotate logs instead of letting them accumulate indefinitely.  Eventually they will consume the filesystem and will likely cause system failure.
** It's wise to account for this when designing a system and put logs on a separate filesystem.
** Retention issues - How long do we keep logs for?
** compress or delete old logs according to an archival schedule
** Logrotate - a tool which periodically runs to rotate log files
*** {{File{/etc/logrotate.conf}}} - Main configuration file
*** {{File{/etc/logrotate.d/}}} - Secondary configuration files
*** Periodically executed by cron to process the log files
**** Take a look at the file {{File{/etc/cron.daily/logrotate}}}

Standard exemptions to syslog:
* {{File{wtmp}}} - binary file, view with last command
* {{File{lastlog}}} - view with lastlog command
* {{Command{psacct}}} - process accounting service.  View with lastcomm command
** Not built-in.  Will need to install {{Monospaced{psacct}}} package and enable the {{Monospaced{psacct}}} service
* Some services do not send to syslog and instead manage log files themselves:
** Apache - Logs are in {{File{/var/log/httpd}}}
** BIND DNS server - Logs may be in {{File{/var/log/named/}}}

syslog as a network service
* syslog is by default a local service and not bound to a network port
* But can be configured to collect log events from multiple hosts
* Many benefits to central logging:
** Aggregate logs in one place for central review and retention
** If an attacker breaks into a system, they cannot easily remove the logs to cover their tracks if the logs are also stored on another server

!!! Kernel boot logging and message buffer
* We need a way to record the kernel events prior to init, before syslog starts and the filesystems are mounted
* The kernel stores this boot information in an internal buffer
** Also contains the system hardware detected on boot and any subsequent hardware changes.
* Captured by the system and recorded once it fully loads
* Viewable with the {{Command{dmesg}}} command
* Also saved to {{File{/var/log/dmesg}}}


!!! Systemd Additions:

!!!! Manage time:
* {{Command{ timedatectl list-timezones }}}
* {{Command{ timedatectl set-timezone //zone// }}}
* {{Command{ timedatectl status }}}

!!!! Display Logs:

{{Command{ journald }}} - New logging daemon with systemd
* Can replace or augment syslog

{{Command{ journalctl }}} - Front end for displaying logs
* Logs since last reboot: {{Command{ journalctl -b }}}
* Show last 10 lines (replace tail): {{Command{  journalctl -n }}} or {{Command{ journalctl -n 20 }}}
* Display new log entries as they arrive (replace tail -f):  {{Command{ journalctl -f }}}
* Display kernel messages:  {{Command{ journalctl -k}}}
* Display log entries for a particular unit:  {{Command{ journalctl -u httpd.service }}}
** For a particular time range:  {{Command{ journalctl -u httpd.service &dash;&dash;since yesterday }}}
** {{Command{ journalctl -u httpd.service journalctl &dash;&dash;since "2015-10-01" &dash;&dash;until "2015-10-31 03:00" }}}


!!! We have all this log data, now what?

* Logcheck - A utility to mine the log data and send reports
* fail2ban - Scan log files and ban malicious ~IPs
** Perform a regular expression match and add offending ~IPs to a firewall
** Important way to combat all of the automated scanning on the internet.  Our class shell server is under constant attack and countermeasures like this should be deployed.

The class shell server is currently blocking 293 IP addresses which attacked us sometime in the last 9 hours:

{{{
[root@shell ~]# fail2ban-client status sshd-root
Status for the jail: sshd-root
|- Filter
|  |- Currently failed: 0
|  |- Total failed:     1514
|  `- Journal matches:  _SYSTEMD_UNIT=sshd.service + _COMM=sshd
`- Actions
   |- Currently banned: 293
   |- Total banned:     1441
}}}


* Web log data
** http://en.wikipedia.org/wiki/List_of_web_analytics_software
** [[GoAccess|http://goaccess.io/]]
** http://www.awstats.org/
** [[Logstalgia|https://code.google.com/p/logstalgia/]] 

Big data analytics: 
* ELK stack - Free & open source (FOSS)
** Elasticsearch - log searching and data analytics
** Logstash - centralized logging and parsing
** Kibana - data visualization
* Enterprise SIEM (Security information and event management) tools
** Splunk - Big data analytics with a nice web front-end
*** //Splunk captures, indexes and correlates real-time data in a searchable repository from which it can generate graphs, reports, alerts, dashboards and visualizations//


! Assignment

<<tiddler [[Lab 55 - Bring core VM online]]>>
----
<<tiddler [[Lab 56 - Time]]>>
----
<<tiddler [[Lab 57 - Logging]]>>
----
<<tiddler [[Lab 58 - Working with logs]]>>
! Material
!! Read: 
- Linux Administration Chapter 17 - DNS. 
- Read the notes in the Material section below when completing the lab assignment.
!! Watch:
* DNS Intro: https://www.youtube.com/watch?v=mpQZVYPuDGU


! Useful additional reading:

*[[DNS for Rocket Scientists|http://www.zytrax.com/books/dns/]]
*Open Resolvers for network amplification attacks (~DDoS)

 - kaminsky DNS bug
 - DNSSEC
 - get familiar with the dig command
 - http://www.mayhemiclabs.com/tools/malwarednsscraper
 - http://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
 - EXPOSURE: Finding Malicious Domains Using Passive DNS Analysis - http://www.syssec-project.eu/m/page-media/3/bilge-ndss11.pdf


http://en.wikipedia.org/wiki/Forward_Confirmed_reverse_DNS
http://www.ietf.org/rfc/rfc1912.txt
http://www.netwidget.net/books/apress/dns/info/ttl.html


! DNS Notes

Unfortunately we don't have enough time to cover DNS in depth.  What's here is only the highlights.  DNS is a core service that is critical for the proper operation of the internet.  Everyone should be familiar with its components and how it works.

!! Host names and some history

Numbers are easier for machines to work with.  But names are easier for people to work with.  As the number of systems on the early networks grew, it became more difficult to keep track of their addresses.  The file  {{File{/etc/hosts}}} was used to set up a manually mapping between IP addresses and hosts before the advent of DNS.  The users could now refer to systems by name and the  {{File{/etc/hosts}}} file would convert that name to an IP address.  Users/administrators would have to maintain their {{File{hosts}}} file themselves, and some would share these files with others.

As the networks kept growing, maintaining these static files and their frequent changes became more difficult.  They would often fall out of date as systems were added, removed, and addresses were changed.  A centrally accessible yet locally maintained system needed to be devised to handle this conversion of host names to IP addresses.  This system became the Domain Name System (DNS).

The {{File{/etc/hosts}}} still exists on all unix systems.  We worked with it during our last labs since we did not yet have DNS in place.  Windows systems also have a hosts file buried within their system32 directory.  By default, hosts defined in the hosts file will override information that comes from DNS.  This is an importing thing to keep in mind.  If someone tampers with your hosts file, they could easily redirect your communication elsewhere.  Check the output of the {{Command{w}}} command on the class shell server.  Where am I connecting from?  Now examine the {{File{/etc/hosts}}} file.

On Unix systems, the file {{File{/etc/nsswitch.conf}}} sets the query order for the databases on the system.  By default, the local files (eg, {{File{/etc/hosts}}}) are searched first and any matches in the local files will override DNS.  This mechanism can also be used to obtain user and group information from other sources.

!! DNS - Domain Name System


A distributed and hierarchical database
* must be robust.  Function of the internet relies on this
* must be distributed
** different groups are responsible for different segments of the data
** if one node goes down the rest of the global data network is not impacted
* must be efficient
** LOTS of requests per second
** caching
*** cached responses saved on local DNS servers for the TTL time (time to live)
*** change to a low TTL value before major updates so records aren't cached very long
*** otherwise, keep a high TTL value to maximize caching and optimize performance

!! Delegation

An example of a proper, fully qualified domain name:  www.ncs205.net''.''  (notice the dot at the end)
The dot at the end is often omitted, but it signifies the root servers.  The delegation of a host name begins on the right end and traverses towards the left.

* root servers  (.)
** These are the main DNS servers for the internet.  There are 13 root servers, a.root-servers.net. through m.root-servers.net.
* Top level domain (TLD):  ccTLD, gTLD, sTLD
** ccTLD:  Country code TLD.  eg:  us, de, cn
** gTLD: Generic TLD.  General purpose.  eg: net, org, com
*** New gTLDs:  Expanding further beyond the original net, com, org:  http://newgtlds.icann.org/en/program-status/delegated-strings
** sTLD: Sponsored TLD.  Has a sponsor representing a specific group.  eg:  .xxx, .travel, .jobs
** Policy set by the Internet Corporation for Assigned Names and Numbers ([[ICANN|https://www.icann.org/]])
** Lists maintained at the Internet Assigned Numbers Authority ([[IANA|http://www.iana.org/domains/root/db]])
* Domain ownership maintained by central registry operators.  These are the companies that are responsible for managing domains within their TLD.
** com & net: ~VeriSign
** org: Public Interest Registry
** edu: Educause
** us:  Neustar
** info: Afilias
** arpa: IANA
* Domain sale contracted out to commercial registrars
** Various registrars sell second-level domains.  eg:  Google, Godaddy, NameCheap, CloudNS, name.com, etc.
** The Authoritative name servers for a domain are stored with the TLD's central registry
** The {{Command{whois}}} command will display registration information for a domain:  {{Command{whois sunypoly.edu}}}
* sub-domains
** each domain can delegate authority for sub domains to other servers
** Allows a domain owner to create their own sub-domains
*** cs.sunypoly.edu : edu delegates sunypoly to ITS, ITS delegates cs to the ~DogNET
*** ncs205.net : net delegates ncs205 to me, I'll delegate //username//.ncs205.net to you
* Glue Records
** An IP address of a domain's name server held at the domain's registry
** Prevents circular dependencies
** Check whois information for a domain to see its registered name servers.
** The {{Monospaced{A}}} record results for name servers must be statically published upstream
*** The {{Monospaced{A}}} record is a type of DNS record.

Glue records are important, otherwise we have a chicken and egg problem.  For example, .net needs to publish the glue records for ncs205.net so anyone looking for its records knows which server to contact.

When I registered the domain ncs205.net, I needed to include the names of the domain's DNS servers.  This is so anyone searching for records belonging to ncs205.net will know where to look for them.  The names of the DNS servers for ncs205.net are ns1.ncs205.net, ns2.ncs205.net, and ns3.ncs205.net.  But how can I look up the IP address for those three hosts if I don't know the IP address of the ncs205.net DNS server?  To get around this problem, I need to publish glue records to the .net registry.  These are hard-coded DNS records so the .net DNS servers can provide the IP addresses for those three servers.

[img[img/dns.png]]


!! Host name representation

Fully Qualified domain name (FQDN)
* Similar to an absolute path in the filesystem
* Instead its read right to left
* First is root, the .
* Next the tld (com, edu, org, etc)
* Next the second-level domain
* Sub domains or host names would follow.
* Example:  fang.cs.sunyit.edu.  
* Client tools will append the trailing ''.'' if you leave it out, but its use in your DNS zone file configuration matters.

<<<
Use the {{Command{whois}}} tool to obtain domain registration and ownership information.
{{Command{yum provides '*/whois'}}} to see which package contains that command
<<<


!! Zone Apex 
* Also known as a naked domain
* What will your domain itself resolve to?  
** It will typically resolve to the same IP address as www.

{{{
@	IN	A	192.168.12.25
@	IN	MX 10	mail
}}}
{{Note{''Note:'' The @ is a shortcut which represents the name of the zone.}}}


!! Authoritative name server
* Contains the data for a particular zone
** A DNS zone is a collection of records for a particular domain or sub-domain.
* It is derived from chaining of NS records starting with the root servers down to the published name servers for a domain
** The NS records identify the authoritative DNS servers for a zone and delegate subdomains
* There is typically a master DNS server and one or more slaves that sync from it
** RFC best practices require at least 2 authoritative name servers per domain
* Authoritative name servers are guaranteed to be up to date.  The data is coming from the source and is not cached

The {{Command{dig}}} command is a powerful tool for querying DNS records.

Trace the delegation from the root servers down to the ~DogNET DNS servers:  {{Command{dig +trace fang.cs.sunyit.edu}}}
Trace google's delegation:  {{Command{dig +trace www.google.com}}}
Query a ~DogNET authoritative DNS server for a record:  {{Command{dig fang.cs.sunyit.edu @ns7.suny.edu}}}
 - notice the {{Monospaced{''aa''}}} flag in the flags section at the top.  This means its an authoritative response.
 - Can you spot the misconfiguration in the Additional Section at the bottom?

!! Zone transfer
* Keeps authoritative DNS servers in sync with each other.
* We want to have multiple servers per zone, ideally distributed across networks.
** DNS is a vital resource.  We don't want single points of failure.
* Zone transfers are triggered automatically when the master is updated or based on a timeframe in the SOA record
* Typically restricted to systems authorized by an ACL (access control list)
** We don't want anyone to be able to pull down all of our records.  Only permit trusted name servers to do this.
* Our pyramid topology
** ns0 setup - A top server which doesn't allow queries from end users distributes zones to lower servers that handle client requests
** especially useful for protecting records and security keys
** and ensuring availability and integrity of the data
** ns0 is the master and kept secure.  When updates are published, it will signal the zones to connect and pull the data

!! Non-authoritative
* Caching name servers.  These are the general-purpose name servers available, such as those offered by your ISP, the Google 8.8.8.8 servers, or the ~CloudFlare 1.1.1.1 DNS servers.
** They cache records for the TTL value to improve performance
** {{Command{dig www.google.com}}}
** clear the cache (if you run the name server) - {{Command{rndc dumpdb}}}
*** or {{Command{ipconfig /flushdns}}} on Windows
** cache snooping
*** analyze the cache to see if malware is on your network (or to snoop on what your users are accessing)
* data retained in the cache might be a little stale

From off campus, such as our shell server:
{{Command{dig fang.cs.sunyit.edu}}}
 - We're now querying the local name server instead of the campus server
 - notice lack of aa flag


!! Recursive vs non-recursive 
* non-recursive : I don't have that data, but you can look over there
** required for core nameservers to offload lookups to other servers (requires less resources)

* recursive : I don't have that data, but let me find it for you
** Performs queries on behalf of clients on the network  (Your ISP may have one)
** pro - data is stored in the cache for faster future lookups
** con - ~DDoS amplification attacks
*** These kind of attacks are a big deal:
<<<
     Up to 70:1 attack ratio (60 bytes for the request, 4000 for the response)
     spamhaus attack - 100gbps
     demo : dig isc.org any
                dig ripe.net +edns=0 +bufsize=4096 any
     DNS traffic is UDP, fire and forget, easy to spoof
     Like open mail relays - have to close down open recursive servers now that they're being actively exploited
     dig +short amiopen.openresolvers.org txt 
<<<
** http://www.theregister.co.uk/2013/06/03/dns_reflection_ddos_amplification_hacker_method/
** http://blog.cloudflare.com/the-ddos-that-knocked-spamhaus-offline-and-ho
** mix - allow recursion for trusted local hosts, but not for remote ones

From the class shell server:
{{Command{dig www.google.com @ns1.cs.sunyit.edu}}} - You (hopefully) will get a //recursion requested but not available// error.
{{Command{dig www.google.com @ns1.verizon.com}}} - Same as above.
{{Command{dig www.verizon.com @ns1.verizon.com}}} - This should return a record because the name server we're querying is authoritative for  it.


!! Hints file
*Store and serve IP addresses for root servers on the local DNS server
** Another chicken and egg problem.  A DNS local server needs to know where the root servers are in order to know where to start looking for data.  This hard-coded mapping between the root server names and their IP addresses is called the hints file. It works similar to the {{File{/etc/hosts}}} file.  You can find a copy of it in {{File{/var/named/named.ca}}} after you install your DNS server software.

!! Querying DNS

Tools for searching DNS:
* {{Command{nslookup}}} - A standard utility available in windows and unix.  It's in the bind-utils package on ~CentOS
* {{Command{host}}} - A barebones DNS query utility
* {{Command{dig [record_type] record [@server]}}} - A very powerful DNS query utility.  Also available in the bind-utils package
** {{Command{dig -x //ip//}}} for reverse DNS queries
* {{Command{drill}}} - A popular alternative to dig.

!!! Using dig
* See available options with {{Command{dig -h}}}
* Don't forget the -x for reverse queries
* perform zone transfer to test data
** From either the class shell server (the slave) or your core VM (the master):  {{Command{dig merantn.ncs205.net @localhost axfr}}}
** axfr is the record type for a zone transfer
** It's always good to compare results between your master server and the slave to ensure they match
* see TTL and cache time remaining
* dig errors
** NXDOMAIN - No records for any type
** NODATA - No records for the requested type  (displayed as NOERROR with no answer section)
** REFUSED - The server is refusing our connection likely due to recursion being requested.


!! Creating Zones

DNS records are grouped into zones on the actual authoritative name servers
Each zone contains resource records of various record types
* lines in the zone files are our records
* There are two types of zones, forward and reverse
** forward: convert host name to IP address
** reverse: convert IP address to host name


!! Resource Records

Resource records are the individual records within a zone and there are many different types.

[[RFC 1033|https://www.ietf.org/rfc/rfc1033.txt]] - Domain Administrators Operations Guide

Format: 
''<name>   [<ttl>]   <class>   <type>   <data>''

Example:
www		5m	IN	A	10.1.2.3

www = the host name to map to the IP address
5m = Change the TTL of this record to 5 minutes.  This field is optional.
IN = Internet
A = The type of record we're creating
10.1.2.3 = The IP address we're mapping the hostname to


!!! SOA - Start of Authority

Each zone must have a SOA record at the top containing the:
* serial number for the zone.  This number must increment every time a change is made, otherwise changes will not propagate
** It is an unsigned 32 bit value and must be between 1 and 4294967295.
** Counting the digits is a fast way to validate.  You'll likely have an error if the serial is more than 10 digits in length.
* refresh - How often slaves check in with their master
* retry - if the master is down, slaves will retry after this amount of time 
* expire - How long a slave will serve data after losing contact with the master.  After this time the data will considered too stale to provide to others.
* minimum - Sets the TTL to cache negative responses. Negative responses are those where a record is not found or does not exist.
** The default unit for the last 4 values is seconds.  Time may be appended as in the example below:  ''m''inutes, ''d''ays, or ''w''eeks.

Here's a sample SOA record:

{{{
@ IN  SOA ns1.merantn.ncs205.net. hostmaster.merantn.ncs205.net. ( 
 2023112200  ; serial number
 1d    ; refresh 
 5d    ; retry 
 2w    ; expire
 30m   ; minimum
)
}}}
''Note:''  The @ is a special shortcut that represents the name of the zone.

{{Warning{''Important note on the SOA serial number:''  This number ''must'' always increment when changes are made to the zone file.  Be very careful when doing so.  Typos here will cause you problems later.  You can use simple numbers for the serial number (eg, start with 1 and then increment + 1 after each change).  However, the ''best practice'' is to use the date you made the change in YYYYMMDD format followed by a two digit count.  This is represented above with the serial number {{Monospaced{''2022102900''}}}.  This format will provide a value that's always increasing while indicating the date when the zone was last updated.  ''Always'' update the date value to the current date when you're updating the zone and increase the count number by one if you're making an additional change for the day.  Reset the count to {{Monospaced{00}}} after updating the date to indicate the first change of the day.

__''If you do not update your serial number then zone changes will not propagate beyond your local DNS server!''__}}}



!!! NS - Nameserver record
 - identify name servers for the current zone and delegate sub-domains
{{{
		IN  NS  ns1.merantn.ncs205.net.
		IN  NS  ns5.ncs205.net.
}}}
''Note:''  If no record name is provided (the missing first column)  it is inherited from the previous record.  In this case, the previous record was the @.

!!! A
 - Address records map a host name to an IP address
 - We can set a TTL value for a specific record
 - Useful if you are about to make a change
{{{
ns1		IN	A	192.168.12.26

test		IN	A	192.168.12.24
www		IN	A	192.168.12.25
core		IN	A	192.168.12.26
}}}

{{Note{''Note:''  A relative hostname may be used instead of entering the fully qualified domain name (FQDN).  If a host name does not end with a . (dot), then the name of the zone will be appended to it.}}}

For example, these two records are both functionally equivalent.  Notice the ''.'' at the end of the FQDN.
{{{
www				IN	A	192.168.12.25
www.merantn.ncs205.net.		IN	A	192.168.12.25
}}}


!!! CNAME
* An alias which will resolve to the canonical host then return its A record IP address
* depth limited to 6 aliases

These will all create CNAME records which will resolve to the core VM:
{{{
loghost		IN	CNAME	core
ntp		IN 	CNAME	core
directory	IN	CNAME	core
}}}
The same concept as above applies here.  The zone name will be appended to each host name which does not end with a dot.


!!! PTR
 - pointer records map from ip address back to host name.  These record types must ''only'' be used in //reverse// zones.  Reverse zones contain mappings of IP addresses to host names.  Do not put them in the same zone file as your forward DNS records!

{{Command{grep PTR /etc/named/master/merantn.ncs205.net.rev}}}
{{{
24 IN  PTR  test.merantn.ncs205.net.
25 IN  PTR  www.merantn.ncs205.net.
26 IN  PTR  core.merantn.ncs205.net.
}}}

!!! MX
* mail exchanger record identifies which systems to send mail to
* These are weighted by priority for multiple mail servers
* Must point to an A record host name
* Example:  {{Command{dig mx google.com}}}

!!! TXT
* create text string records in DNS
* Useful for publishing additional information about a zone or a host
* The text value may need to be enclosed in quotes


!!! These are only the primary record types!  There are many more.


Sources
 - http://dns.measurement-factory.com/surveys/openresolvers.html


! Assignment

<<tiddler [[Lab 59 - Bind config and zones]]>>
! Material

!! Crypto & Securing Communication:

!!! Reading:
Oddly, our textbook does not discuss SSL encryption with Apache.  The concepts we're applying to Apache are universal among services.  Only the implementations differ slightly.

Here's are some alternate sources to review:

!!! Watch:
* Brief overview: https://www.youtube.com/watch?v=w0QbnxKRD0w
* Crypto overview: https://www.youtube.com/watch?v=AQDCe585Lnc
** The math behind asymmetric encryption: https://www.youtube.com/watch?v=YEBfamv-_do
* The TLS handshake: https://www.youtube.com/watch?v=cuR05y_2Gxc

!! Scheduled Tasks:

!!! Read:
* Linux Administration Chapter 9, pages 197-199 (cron) 

{{Warning{''Warning:'' The textbook is light on the details and only discusses cron from a user perspective.  There is not much mentioned about the system scheduled tasks saved in the files within {{File{/etc/}}}.  The notes below are more thorough.}}}


! Notes

Understanding the core concepts involved with securing network communication is important for a security practitioner.  The advent of [[Let's Encrypt|https://letsencrypt.org/]] and the free SSL certificates they offer has made trusted encryption available to the masses.  Prior to the Let's Encrypt project, a site operator had to pay a commercial certificate authority to issue a certificate for their site.  This added expense limited encryption to those with the time and budget to pay for it. 

We're now going to cover some core encryption concepts while implementing secure communication for our web sites.

!! Different concepts for different purposes
* ''Encoding'' - Data is transformed from one form to another.  Usually easily reversible and not secure.  
** base64 encoding to convert a binary file to text for transmission over email (a text-based medium)
*** The {{Command{base64}}} command will encode or decode a base64 encoded string
** audio or video encoding, employing a codec (coder-decoder)
* ''Hashing'' - Data is converted into a fixed-size string (a hash) using a non-reversable hash function.  The length of the hash is always the same, regardless of the amount of input data.
** Algorithms have evolved over time.  Current available algorithms are md5, sha-256, and sha-512.
** Used to secure passwords.  A hash of your password is stored on the system.  When you log in, the password you enter is hashed and compared to the hash stored on the system.
*** Salt: Extra data that is added to a password to ensure the same passwords do not have the same hash.  Significantly slows down brute force attacks.
*** Generate a password hash with perl: {{Command{ perl -e 'print crypt("PlaintextPassword","\$6\$hash-salt\$") . "\n"' }}}
**** {{Monospaced{//PlaintextPassword//}}} is the password you would like to hash
**** {{Monospaced{//hash-salt//}}} is the the salt to use.  Salts should be valid random characters and ideally unique to each stored password.
** Used for integrity validation.  A hash of a file can be saved.  If the file changes, its hash will no longer match the stored copy.  
*** Commands to generate a hash of a file: {{Command{md5sum}}}, {{Command{sha256sum}}}, and {{Command{sha512sum}}}.
*** Hashes of your labs are stored in {{File{/opt/pub/ncs205/submit/checksums}}} when they are collected to ensure changes are not made after they are graded.
* ''Encryption'' - Use of an encryption algorithm and a secret key (cipher) to transform a private message (plain-text) into encrypted data (cipher-text).  Only those possessing the secret key can view the original message.
** Encryption is reversible with the encryption key

!! Main Encryption Goals:
* Confidentiality - Prevent disclosure to unauthorized parties
* Integrity - Prevent tampering of transmitted data
* Authenticity - Ensure communication is genuine and with the intended target

!! Encryption basics

There are two different types of encryption algorithms - symmetric and asymmetric.  It is important to understand the differences between them and where each is appropriate.

!!! Symmetric cryptography:
* Same key is used for both encryption and decryption of the message
* Also known as a shared secret
** {{Command{openssl aes-256-cbc -a -salt -in secretfile.txt}}}
** Secure file transfer with netcat and openssl
*** receiver#  {{Command{ nc -l 4444 | openssl aes-256-cfb -salt -d | tar -xvf - }}}
*** sender#  {{Command{ tar -cvf - file | openssl aes-256-cfb -salt -e | nc client 4444 }}}
* Pros:
** Fast
** Not resource intensive 
** Useful for small and large messages
* Cons:
** Key exchange must occur over a secure channel
** How can you exchange crypto keys over a secure channel that doesn't yet exist because you haven't exchanged keys yet?  Another chicken and egg problem.

!!! Asymmetric:
* Public key cryptography
* Two keys instead of one shared secret
** public key - available for everyone.  Can be published
** private key - kept secret and secure.  Typically locked with a passphrase.
* Data encrypted with one key can be viewed or verified by the other
** Can be used for encrypting or signing messages
* Pros:
** Safe key distribution over an insecure channel
* Cons:
** Slow
** More resource intensive
** Only useful for small messages


!!! Symmetric / Asymmetric Hybrid
* Use asymmetric encryption only to transmit a symmetric key.
* Then use symmetric encryption for the actual message.
* The best of both algorithm types:
** Can safely exchange key data
** Fast
** Not resource intensive
** Useful for small and large messages


!! Encryption Uses:

PGP / GPG
* Encrypt or sign files and messages
* Command line tools available:  {{Command{gpg}}}
SSH
* user keys for authentication instead of passwords
** Use ssh-keygen to generate keys
* host keys for encrypting communication between client and server

!!! SSL Certificates
* A SSL certificate contains information about the owner of the certificate and their public key
* Signed by a Certificate Authority (CA) to establish trust
** The Certificate Authority is //supposed// to take some steps to verify the site owner actually owns the site and they're only issuing certificates to the owners.
** Typically a commercial company like Godaddy, Verisign, or Entrust
** Or nonprofit CA to issue free certs:  https://letsencrypt.org/
** ~Self-signed certificates can be created without using an external entity, but they won't be trusted by default
* The CA's signature is added to the certificate to establish trust
** If you view a site's certificate in your web browser, you can see the chain of trust from the Certificate Authority to the site certificate.
* Certificate Verification occurs by matching the host name in the certificate to the host name in the network communication
** Host name in the URL must match the host name in the certificate (single host)
** Or use a wildcard certificate for many sites (*.sunyit.edu)


!!!! SSL Trust

CA certificate stores
* Hardcoded list of trusted root CA certificates in either the application or operating system
** Stored within {{File{/etc/pki/tls/certs/ca-bundle.crt}}} in ~CentOS
** {{Command{grep Issuer /etc/pki/tls/certs/ca-bundle.crt | less}}} to see who's CA certificates the OS is trusting
* Intermediate CA resellers
** These resellers are trusted by a root CA certificate to issue certs on their behalf
* Transitive trust
** A trusts B and B trusts C, thus A trusts C
* Web of trust instead of a direct chain
** There are so many trusted certificate authorities, a weak link in any one of them completely destroys the ability to truly trust any of them.
* Certificate Authority weaknesses
** Several Breaches at CA Intermediaries.  This sort of thing seems to happen a lot.
***[[DigiNotar (2011)|https://security.googleblog.com/2011/08/update-on-attempted-man-in-middle.html]] - Issued a wildcard certificate for google.  About 500 other fake certificates were issued.
** Bad actors
*** Man in the middle proxies
**** [[Gogo Serving Fake SSL Certificates to Block Streaming Sites|http://www.pcmag.com/article2/0,2817,2474664,00.asp]]
**** [[SSL/TLS Interception Proxies and Transitive Trust|http://www.secureworks.com/cyber-threat-intelligence/threats/transitive-trust/]]
*** Malware
**** [[Lenovo's Superfish|http://www.slate.com/articles/technology/bitwise/2015/02/lenovo_superfish_scandal_why_it_s_one_of_the_worst_consumer_computing_screw.html]]
*** [[Microsoft Blacklists Fake Finnish Certificate|http://yro.slashdot.org/story/15/03/18/2048244/microsoft-blacklists-fake-finnish-certificate]]
*** [[Chinese CA issues Google certificates|https://googleonlinesecurity.blogspot.com/2015/03/maintaining-digital-certificate-security.html]]
*** [[Thawte issues certs for domains it doesn't own|http://www.itworld.com/article/2999145/security/google-threatens-action-against-symantec-issued-certificates-following-botched-investigation.html]]
*** [[Researchers find, analyze forged SSL|http://www.net-security.org/secworld.php?id=16843]]
** Certificate revocation problems
*** The mechanisms in place to get the word out that an issued certificate cannot be trusted aren't very robust.
* Leaks or vulnerabilities - Many ways to attack the infrastructure
** [[Heartbleed|https://en.wikipedia.org/wiki/Heartbleed]] - Broke encryption for almost the entire Internet.  For two years anyone could obtain a server's encryption key.
** [[Poodle|https://www.openssl.org/~bodo/ssl-poodle.pdf]]
** Weak ciphers and downgrade attacks
** There are too many other examples

!!!! Countermeasures for the weaknesses in the trust system:
* Public key pinning
** Lock a certificate to a specific CA
** http://thenextweb.com/apps/2014/09/02/firefox-32-arrives-new-http-cache-public-key-pinning-support-easy-language-switching-android/
** https://wiki.mozilla.org/SecurityEngineering/Public_Key_Pinning
** https://raymii.org/s/articles/HTTP_Public_Key_Pinning_Extension_HPKP.html

* ~DNS-based Authentication of Named Entities  (DANE)
** Remove the ~CAs from the process
** Use DNS to authenticate certificates much like SSH fingerprint records
** Add a ''tlsa'' resource record to your DNS zone
** Move from web of trust to chain of trust like DNSSEC
** Since DNS is totally open, if something is compromised it should be detectable
** Easy to revoke certificates
** https://www.huque.com/bin/gen_tlsa
** [[DerbyCon Video|http://www.irongeek.com/i.php?page=videos/derbycon4/t404-dns-based-authentication-of-named-entities-dane-can-we-fix-our-broken-ca-model-tony-cargile]]

* Online Certificate Status Protocol - Obtain revocation status of a certificate
* Hardcoded blocklists in web browsers


!! Web Encryption

!!! Background

HTTP encapsulated with TLS
 - TLS = Transport Layer Security
 - Replacement for SSL protocol
This is an encryption layer on top of HTTP
 - Used to authenticate the server and encrypt the communication

Use Hybrid encryption
* Encryption algorithms are decided by the browser and the server
* The most secure method available to both is used
* Symmetric encryption is used for the transaction
* But we need to safely share the key
** Asymmetric crypto is used to send the key
** The server's public key is stored inside the site's certificate

HTTPS Handshake:
* Browser initiates the connection
* Server responds with its certificate 
* Browser advertises its encryption methods and sends a symmetric session key encrypted with the server's public key
* Server decides which cipher to use
* Server and client use this session key for symmetric encryption of the data
** Forward Secrecy - key agreement protocols which ensure a compromise of private keys will not lead to a compromise of past session keys.  This helps mitigate future SSL attacks.

Public key crypto is only used to establish faster symmetric encryption


!!! Implementation

Let's set all of this up to secure communication to our web server.

''Note:''  The examples below contain my username, IP address, and DNS records.  Be sure to replace them with your values.
  

!!!! Key generation
* Save on your web VM within the directory {{File{/etc/pki/tls/}}}
** This is the standard system directory for keys and certificates

----
!!!!! There are two ways to create SSL keys and certificates:
A) Manually:
- ''Do not do this'' for your web server ~VMs; this is only here for informational purposes.  We will be using the automated process below.

* SSL Keys
** Create and secure a SSL key with a passphrases
** Create key: {{Command{ openssl genrsa -aes256 -out www.//username//.key 2048 }}}
** Verify key: {{Command{ openssl rsa -noout -text -in www.//username//.key }}}
* Certificate Signing Request (CSR)
** The CSR will collect the server information.  It would be sent to a commercial certificate authority who will then create and sign the certificate.
** Create CSR: {{Command{ openssl req -new -sha256 -key www.//username//.key -out www.//username//.csr }}}
** Verify CSR: {{Command{ openssl req -noout -text -in www.//username//.csr }}}
* Certificate Authority (CA)
** Either commercial or self-signed
** Commercial - send the CSR file to the CA
** ~Self-Signed:
*** Become an untrusted certificate authority yourself:
**** Create CA Key: {{Command{ openssl genrsa -aes256 -out ncs205CA.key 4096 }}}
**** Create CA Cert: {{Command{ openssl req -x509 -new -sha256 -key ncs205CA.key -out ncs205CA.crt -days 18250 }}}
**** Sign a certificate:  {{Command{ openssl x509 -req -in www.merantn.csr -CA ncs205CA.crt -~CAkey ncs205CA.key -~CAcreateserial -out www.merantn.crt -days 735 }}}
**** Display the new certificate: {{Command{ openssl x509 -noout -text -in ncs205CA.crt }}}

Whoever control's a systems certificate store controls secure communication to that system!  If I create a CA certificate and can add it to your system, any certificates I issue will be trusted.  Man in the middle attacks would not display the normal "Untrusted site" warnings in your browser unless the site operator has deployed countermeasures.

'' - or - ''

B) ''Automated:'' Using Let's Encrypt and {{Command{acme.sh}}} to generate SSL certificates
 - Use this method to create an SSL certificate for your web servers

A trusted SSL certificate must only be issued to someone who can prove they own the domain. 

Let's Encrypt and the {{Command{acme.sh}}} tool can obtain this verification either by checking for a small file placed within the ~DocumentRoot of your website or checking DNS for a particular record.  Only someone who actually owns the domain should be able to perform these tasks.  Normally, the first method is utilized because it is easy to automate.  This is not an option for us because our web servers are on a private network which cannot be accessed directly from the internet.  The Let's Encrypt servers will not be able to access our web servers to verify them.  Our only option is DNS verification because you control your DNS servers and the DNS records are available outside of our lab environment.

{{Warning{''Warning:'' Let's Encrypt certificates are only valid for 90 days and must be continually renewed.  An automated mechanism must be in place to ensure the certificates are renewed prior to expiration.  A scheduled task must be created to renew the certificates before they expire.  This isn't a concern for us since the class will end before these certificates expire.  The [[acme.sh documentation|https://acme.sh]] will walk you through how to set up this scheduled task.}}}


You cannot proceed if your DNS server is not yet set up properly.  Ensure an external test properly returns an IP address before continuing with SSL configuration.  Replace my username with yours everywhere it appears throughout these instructions.
{{{
[merantn@core ~]$ dig www.merantn.ncs205.net @1.1.1.1

; ; <<>> ~DiG 9.11.4-P2-~RedHat-9.11.4-9.P2.el7 ; <<>> www.merantn.ncs205.net @1.1.1.1
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59587
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 1452
;; QUESTION SECTION:
;www.merantn.ncs205.net.                IN      A

;; ANSWER SECTION:
www.merantn.ncs205.net. 249     IN      A       192.168.12.25

;; Query time: 71 msec
;; SERVER: 1.1.1.1#53(1.1.1.1)
;; WHEN: Mon Apr 27 04:43:32 EDT 2020
;; MSG SIZE  rcvd: 89
}}}


Complete the following steps to use {{Command{acme.sh}}} to generate an SSL certificate for your web server:

{{Warning{''Warning'': Getting this all set up is a well choreographed dance.  All of the pieces need to fall together precisely in order for it to work.  Once it's set up, it'll run flawlessly for years.  Go slow, pay attention, and mind the typos.  Copy & paste as much as you can.  Typing all of this out is asking for pain and punishment.  

Replace my username and IP addresses where you see them.  Copy the large commands from this page to notepad, make the edits, and then paste the commands into the shell.  Be sure to review everything closely before executing.}}}

On your DNS server:
# Add a CAA ([[Certification Authority Authorization|https://letsencrypt.org/docs/caa/]]) record to your zone file.  A CAA DNS record indicates which Certificate Authorities are allowed to issue certificates for the domain.
# Add a CNAME record resembling the following to your forward zone:
** {{Monospaced{_acme-challenge.www	IN	CNAME	//username//.acme.ncs205.net.  }}}
** Be sure to replace //username// with your username
** See my full zone below for an example.
# Increment your SOA serial number and reload your zone file with {{Command{rndc reload}}}
** Notice the serial number below has changed from the last time I posted my zone file.  Do not omit this crucial step.

My full zone file:
{{{
[root@core ~]# cat /etc/named/master/merantn.ncs205.net.fwd
$TTL 5m
@ IN  SOA ns1.merantn.ncs205.net. hostmaster.merantn.ncs205.net. (
 2023112900     ; serial number
 1d    ; refresh
 5d    ; retry
 2w    ; expire
 30m   ; minimum
)
                IN      NS      ns1.merantn.ncs205.net.
                IN      NS      ns5.ncs205.net.
                IN      CAA 128 issue "letsencrypt.org"


ns1             IN      A       192.168.12.26

test            IN      A       192.168.12.24
www             IN      A       192.168.12.25
core            IN      A       192.168.12.26

loghost         IN      CNAME   core
ntp             IN      CNAME   core
directory       IN      CNAME   core

_acme-challenge.www     IN      CNAME     merantn.acme.ncs205.net.
}}}

* Don't forget to replace my username with yours everywhere it appears.

Wait for DNS to propagate and you're able to verify your records exist in local and external DNS before proceeding.  These two commands can be used to verify that the DNS records are ready.  We see validation here for //my// DNS records both locally and globally.

{{Commands{
''1)'' Local check:
[root@core ~]#  ''dig _acme-challenge.www.merantn.ncs205.net CNAME @localhost +noall +answer''

&#59; &lt;&lt;&gt;&gt; ~DiG 9.11.4-P2-~RedHat-9.11.4-26.P2.el7_9.4 &lt;&lt;&gt;&gt; _acme-challenge.www.merantn.ncs205.net TXT @localhost +noall +answer
&#59;&#59; global options: +cmd
@@_acme-challenge.www.merantn.ncs205.net. 300 IN CNAME merantn.acme.ncs205.net.@@

''2)'' External DNS check against the ~CloudFlare DNS server at 1.1.1.1
[root@core ~]# ''dig _acme-challenge.www.merantn.ncs205.net CNAME @1.1.1.1 +noall +answer''

&#59; &lt;&lt;>> ~DiG 9.11.4-P2-~RedHat-9.11.4-26.P2.el7_9.4 &lt;&lt;&gt;&gt; _acme-challenge.www.merantn.ncs205.net TXT @1.1.1.1 +noall +answer
&#59;&#59; global options: +cmd
@@_acme-challenge.www.merantn.ncs205.net. 300 IN CNAME merantn.acme.ncs205.net.@@

}}}
You should see the same CNAME record value returned by both queries.  __If you don't see any output in the second command, then you likely either forgot to properly increment your serial number or reload your zone.__

{{Note{''Note:'' I only added the ''+noall'' and ''+answer'' options to ''dig'' to trim the output so it's easier to post here.  You don't have to use these options in your checks.  Omitting them will yield more details which are often very useful.}}}

Once the CNAME records are fully in place and verified, complete the following steps on your web server VM:


On your web VM:
# Download the {{Command{acme.sh}}} shell script and save it to {{File{/usr/local/sbin/}}} on your web server VM
## {{Command{wget -O /usr/local/sbin/acme.sh https://raw.githubusercontent.com/acmesh-official/acme.sh/master/acme.sh}}}
## Be sure to make the file executable
# Download the ~CloudNS API plugin
## {{Command{mkdir -p /root/.acme.sh/dnsapi/}}}
## {{Command{wget -O /root/.acme.sh/dnsapi/dns_cloudns.sh https://raw.githubusercontent.com/acmesh-official/acme.sh/master/dnsapi/dns_cloudns.sh}}}
# Define the API credentials
** We will be performing DNS verification via API.  These two commands will save the API username and password to the shell environment for the acme.sh script to retrieve. 
## {{Command{export ~CLOUDNS_SUB_AUTH_ID="//username//"}}}
## {{Command{export ~CLOUDNS_AUTH_PASSWORD="//password//"}}}
## Verify the username and password variables:  {{Command{ echo $~CLOUDNS_SUB_AUTH_ID - $~CLOUDNS_AUTH_PASSWORD}}}.  You should see them repeated back to you.
**  The actual username and password will be posted to the Discord channel for this week's material.
# Begin the authorization process for a certificate for your web server from Let's Encrypt.  ''Be sure to replace my username with yours everywhere it appears''
## Perform a test of the SSL certificate issue:  {{Command{/usr/local/sbin/acme.sh &#045;-server letsencrypt &#045;-issue -d www.merantn.ncs205.net &#045;-domain-alias merantn.acme.ncs205.net &#045;-log &#045;-dns dns_cloudns &#045;-dnssleep 60 &#045;-test}}}

If the certificate is properly issued, you should see it displayed to the screen followed by similar text:
{{Monospaced{
&#045;&#045;&#045;&#045;&#045;END CERTIFICATE&#045;&#045;&#045;&#045;&#045;
[Thu Apr 15 00:38:10 EDT 2021] Your cert is in  /root/.acme.sh/www.merantn.ncs205.net/www.merantn.ncs205.net.cer
[Thu Apr 15 00:38:10 EDT 2021] Your cert key is in  /root/.acme.sh/www.merantn.ncs205.net/www.merantn.ncs205.net.key
[Thu Apr 15 00:38:10 EDT 2021] The intermediate CA cert is in  /root/.acme.sh/www.merantn.ncs205.net/ca.cer
[Thu Apr 15 00:38:10 EDT 2021] And the full chain certs is there:  /root/.acme.sh/www.merantn.ncs205.net/fullchain.cer
}}}

The {{Monospaced{&#045;-test}}} option at the end of our last command means this is only a test run of the process.  We must ensure everything works before requesting the real certificate.  If things aren't set up correctly and you request the real certificate too many times, you'll end up blocked by the validation servers.

If the previous command completed successfully, run it again without the {{Monospaced{&#045;-test}}} option at the end.  You may also need to add a {{Monospaced{-f}}} option to force it.

# Use {{Command{yum}}} to install the package {{Monospaced{''mod_ssl''}}} on your web VM.  This is the Apache extension which will provide encryption
# Edit the Apache SSL configuration file, {{File{/etc/httpd/conf.d/ssl.conf}}}
## Search for the ''~SSLCertificateFile'' directive and change the path to {{File{/etc/pki/tls/certs/www.merantn.ncs205.net.cer}}}
*** This file contains our server's public key
## Search for the ''~SSLCertificateKeyFile'' directive and change the path to {{File{/etc/pki/tls/private/www.merantn.ncs205.net.key}}}
*** This file contains our servers private key
## Search for the ''~SSLCertificateChainFile'' directive and change the path to {{File{/etc/pki/tls/certs/www.merantn.ncs205.net.fullchain.cer}}}
*** This file contains the intermediate certificates.  Our browser contains the Root CA certificate.  Including the intermediate certificate in our bundle completes the trust chain.
*** This configuration directive may need to be uncommented.
# Install the certificates to their proper place on the system and restart Apache:
** {{Command{/usr/local/sbin/acme.sh &#045;-install-cert -d www.merantn.ncs205.net &#045;-log &#045;-cert-file /etc/pki/tls/certs/www.merantn.ncs205.net.cer &#045;-key-file /etc/pki/tls/private/www.merantn.ncs205.net.key &#045;-fullchain-file /etc/pki/tls/certs/www.merantn.ncs205.net.fullchain.cer &#045;-reloadcmd "/usr/sbin/apachectl restart" }}}
# Verify Apache is running: {{Command{systemctl status httpd}}}
# Verify the apache config: {{Command{apachectl configtest}}}
** This will identify any errors that might have been introduced.
** It's always wise to validate your configuration before restarting a service.  If you don't and there's a problem, your service will be offline while you sort it out.
** Not a big deal here, but this will be a big deal in the future when downtime costs money and might get you fired.
----

View the server certificate chain:  {{Command{ true | openssl s_client -connect www.merantn.ncs205.net:443 -showcerts}}}
 - Replace my username with yours.
 - You should see ''Verify return code: 0 (ok)'' near the bottom.  This return code means a valid certificate was presented and the connection is fully trusted.
 - You can also run that command with my username to see what the expected output should look like

Verify your SSL certificate is fully trusted with {{Command{curl}}}.  Notice the certificate information and dates in the connection details.  Be sure to use the ''https'' protocol for an encrypted connection:
{{{
[root@www ~]# curl -v https://www.merantn.ncs205.net/
* About to connect() to www.merantn.ncs205.net port 443 (#0)
*   Trying 192.168.12.25...
* Connected to www.merantn.ncs205.net (192.168.12.25) port 443 (#0)
* Initializing NSS with certpath: sql:/etc/pki/nssdb
*   CAfile: /etc/pki/tls/certs/ca-bundle.crt
  CApath: none
* SSL connection using TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
* Server certificate:
*       subject: CN=www.merantn.ncs205.net
*       start date: Nov 25 23:34:06 2023 GMT
*       expire date: Feb 23 23:34:05 2024 GMT
*       common name: www.merantn.ncs205.net
*       issuer: CN=R3,O=Let's Encrypt,C=US
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: www.merantn.ncs205.net
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Sun, 26 Nov 2023 00:36:29 GMT
< Server: Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/7.4.33
< Last-Modified: Thu, 02 Nov 2023 06:09:17 GMT
< ETag: "56-609253a55fa28"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
< 
<HTML>
<BODY>
<BR><BR><BR>
<center><B>Welcome to NCS205!</B></center>
</BODY>
</HTML>
* Connection #0 to host www.merantn.ncs205.net left intact
}}}


When an SSL certificate is deployed, connections must be by hostname because that is the primary mechanism to verify that the SSL certificate is valid.  Connections by IP address or localhost will return verification errors.  
 - Look in the {{Command{curl}}} output above for the certificate ''Subject:'' field, {{Monospaced{subject: CN=www.merantn.ncs205.net}}}.  The hostname in the certificate subject field must match the hostname used by curl or your browser to access the site.

We can see the verification errors if something other than the hostname used in the certificate is used to access the website:
{{{
[root@www ~]# curl https://localhost/
curl: (51) Unable to communicate securely with peer: requested domain name does not match the server's certificate.

[root@www ~]# curl https://192.168.12.25/
curl: (51) Unable to communicate securely with peer: requested domain name does not match the server's certificate.
}}}


!!! Helpful sites:
* Test server SSL quality:  http://www.ssllabs.com/
* https://mozilla.github.io/server-side-tls/ssl-config-generator/
* https://cipherli.st/

!!! Additional Reading

https://www.schneier.com/blog/archives/2010/07/dnssec_root_key.html
Implementation http://www.tldp.org/HOWTO/SSL-Certificates-HOWTO/x64.html
SSL/TLS Details: http://en.wikipedia.org/wiki/Transport_Layer_Security
[[Bulletproof SSL & TLS|https://www.feistyduck.com/books/bulletproof-ssl-and-tls/]]


!! Scheduled tasks

Any system will contain jobs which must be run at some sort of periodic interval.  They can be regular maintenance tasks common to all Unix systems or unique tasks custom to a specific server.

Log file rotation is an example of a regularly scheduled task common to all systems.  Every day, cron executes the {{Command{logrotate}}} command to ensure log files do not accumulate indefinitely.  Log files are renamed and sometimes compressed according to the schedule outlined in it's configuration file {{File{/etc/logrotate.conf}}}.  Very old log files will be deleted once their retention period has expired.

The collection of your PDF labs is an example of a unique task custom to our class shell server.  A script is executed every hour which collects your new labs from the {{File{/opt/pub/ncs205/submit/}}} directory and copies them to my grading queue.  Here is the cron job which makes that happen:
{{{
[root@shell ~]# cat /etc/cron.d/lab-collect
# Example of job definition:
# .---------------- minute (0 - 59)
# |  .------------- hour (0 - 23)
# |  |  .---------- day of month (1 - 31)
# |  |  |  .------- month (1 - 12) OR jan,feb,mar,apr ...
# |  |  |  |  .---- day of week (0 - 6) (Sunday=0 or 7) OR sun,mon,tue,wed,thu,fri,sat
# |  |  |  |  |
# *  *  *  *  * user-name  command to be executed

0    *  *  *  * root       /opt/rsl/collect.sh
}}}


Tasks may be scheduled using these two services
* {{Monospaced{cron}}} - run periodically at a specified interval.  This system is for tasks which will run regularly.
* {{Monospaced{at}}} - run once at a scheduled date & time.  This system is for tasks which are one-offs.

!!! More examples of tasks to schedule at a regular interval:
* SSL certificate renewal
* Daily reports
* Garbage collection (remove old or temporary files to free disk space)
* Vulnerability checks
* System updates
* Source code updates
* {{Command{mysqldump}}} - database backups
** create a read-only mySQL account
** save backup to {{File{/opt/work/backups/}}}
* System backups

* Or, using {{Command{at}}}, scheduling a one-time job for a more convenient time
** ie: something that may be bandwidth or CPU intensive.


!!! cron - run periodically
* {{Command{crond}}}
** A service that is installed with the OS and running by default
** Started automatically on system boot 
** Permission for regular users to use this tool is granted or revoked via the {{File{/etc/cron.{deny,allow&#125;}}} files
* System cron configuration files & directories:
** {{File{/etc/crontab}}} file - The main configuration file.  Each line in this file is a job to run at the specified schedule
** {{File{/etc/cron.d/}}} directory - Where to put individual files containing single or related jobs. Separate files are easier to maintain in an automated fashion.
** {{File{ /etc/cron.{hourly,daily,weekly,monthly} }}} - Scripts placed in these directories will be executed on that interval

* User cron configuration: 
** {{Command{crontab}}} command for accessing scheduled ''user'' jobs
** {{Command{crontab}}} {{Monospaced{[-e|-l]}}} - edit or list the ''user'' cron jobs
** User cron config files are stored in {{File{/var/spool/cron/}}} if you'd like to review them

{{Note{''Note:'' 
 - Scheduled tasks related to the function of the system or its services should be saved in the cron configuration files within {{File{/etc/}}}.  
 - Scheduled tasks which are only for a specific user (generally a non-root user) should be saved using the {{Command{crontab -e}}} command. }}}


!!!Crontab file format
* See {{File{/etc/crontab}}}
* Declare variables (if you need to) - Shell variables can be set to define items such as the $PATH to use for executables and the user account to email any results to.
** The defaults are reasonable.  You generally only need to set variables to make changes to the defaults or define something not already set.
* Output optionally sent to the owner via email (on by default) 
* Command execution fields:
** Time to execute command, (minute, hour, day, month, weekday) 
** User to run as (This can only be specified when running from a system crontab configuration file)
** Command string to execute
*** It's a best practice to use the full, absolute path to the command you want to execute.
* Special time nicknames:
** @reboot
** @daily
** Complete list is available in the cron man page
* Special time formatting:
** */2 : Every 2 hours, months, days, etc
** 1-4 : Range, for example from 1 to 4
** 1-6/2 : Every other between 1 and 6 
** 2,5 : Multiple, for example 2 and 5

!!! Cron man pages:
* Check man page for the {{Command{cron}}} command
* Notice the ''See Also'' section at the bottom where {{Command{crontab}}} is listed in two different manual sections
* {{Command{man man}}} - will describe how to access different manpage sections

The website https://cron.help/ is also be a good resource for validating your cron scheduling.

!!! Troubleshooting cron
* Logging on Linux does a great job providing useful information to help troubleshoot issues
** Most services write to a log file somewhere below the directory {{File{/var/log/}}}
* The cron log file will be a good resource - {{File{/var/log/cron}}}
* Be sure to include the last few lines of this log file if you need to reach out for help


!!! at - run once
* System dependent:
** ~FreeBSD - The atrun utility is executed via cron every 5 minutes to check for and run {{Command{at}}} scheduled tasks
** ~CentOS - atd - Daemon running in the background for processing tasks scheduled via the {{Command{at}}} utility
** This is not installed or running by default (on ~CentOS)
** It must be set to start on boot and be manually started after installation, just like any other new service we add.
* {{Command{at}}} user command to add jobs.  Run it with the time the task should execute as an argument
** flexible time formatting
*** {{Command{ at +15 minutes }}}
*** {{Command{ at 4:15 }}}
*** {{Command{ at 4pm tomorrow }}}
*** {{Command{ at 4pm October 15 }}}
* Display scheduled job with {{Command{at -c}}}
** Scheduled jobs stored in {{File{/var/spool/at/}}} files
** {{Command{ atq }}} - display scheduled at jobs
** {{Command{ atrm }}} - remove scheduled at job
** Can use {{File{/etc/at.{allow,deny&#125;}}} files to control access to this utility

!!!! Examples:
For our recent security competition, I wanted to lock access to a system for lunch and re-enable access after the lunch break ended:

{{{
root@vce1:~# at 12pm
warning: commands will be executed using /bin/sh
at> pct stop 1215
at> <EOT>
job 2 at Sat Apr 9 12:00:00 2022

root@vce1:~# at 1pm
warning: commands will be executed using /bin/sh
at> pct start 1215
at> <EOT>
job 3 at Sat Apr 9 13:00:00 2022

root@vce1:~# atq
3      Sat Apr 9 13:00:00 2022 a root
2      Sat Apr 9 12:00:00 2022 a root
}}}


! Assignment

<<tiddler [[Lab 60 - SSL Certificates]]>>

<<tiddler [[Lab 61 - Scheduled Tasks]]>>
! Material

!! Read:
* Linux Administration Chapter 6 - Managing users & Groups
* [[sudo tutorial|https://phoenixnap.com/kb/linux-sudo-command]]
* [[sudoedit tutorial|https://www.howtoforge.com/tutorial/how-to-let-users-securely-edit-files-using-sudoedit/]]

!! Watch:
* {{Command{sudo}}} use and configuration:  https://www.youtube.com/watch?v=YSSIm0g00m4
** Note: In the video, the sudoers file is edited by executing {{Command{sudo visudo}}}.  This may not work on our ~VMs.  Instead use {{Command{su}}} to become root and then run {{Command{visudo}}} to edit the sudo configuration.


! Notes - Access control & user management

''Authentication'' - Who you are.  The process of ascertaining that someone is actually who they claim to be
''Authorization'' - What you are allowed to do.  Rules to determine who is allowed to perform certain tasks

!! Access control

!!! From the beginning unix maintained a multi-user system
* All objects (files & processes) have owners
* A user owns new objects they create 
* The administrative user (root) can act as the owner of any object
* Only root can perform most administrative tasks

!!! Groups

A mechanism to grant permissions to groups of users, such as all students in a particular class.

* The filesystem has a more sophisticated access control system
* Each file has a user owner and a group owner
* Permissions can be set so group members may have their own set of access controls (rwx)
* Groups can be harnessed to control access to the system

The directory {{File{/opt/pub/ncs205/}}} is set so only those in this class can access its files.


!!! root (uid 0)
* The root user is the standard unix superuser account
* There's nothing special about the user name - it's all in the user ID (UID) number
** Unix systems track everything by number:  process ~IDs, device ~IDs, IP addresses, uid, and gid
** We prefer names over numbers
* Check out the {{File{/etc/passwd}}} on your ~VMs.  There's a second uid 0 user account named {{Monospaced{toor}}}
** An unknown uid 0 backdoor account would normally be a huge red flag.  But this account is so I can get into your systems to help if something breaks.
** It has the same privileges as your {{Monospaced{root}}} account, but uses a password that I have.
** As far as the system is concerned, {{Monospaced{root}}} & {{Monospaced{toor}}} are the same person because they have the same user uid number.

!!! Privilege separation 
* superuser (uid 0) - The superuser - ideally only use the system with superuser privileges when necessary.
* normal users - the regular users on the system.  How we all access and use the class shell server.
* service accounts - These are the accounts our services run as, such as the {{Monospaced{apache}}} and {{Monospaced{mysql}}} users on your web server or {{Monospaced{named}}} user on your core VM.
** nobody or daemon accounts
*** Generic unprivileged accounts which run services as unprivileged users in case the services are broken into.  This way they'll have very limited access to the rest of the system
*** Services ran as root in the old days.  If a service was exploited and an attacker was able to access files or run commands, they would then have access to the entire system.
** principle of least privilege - Only grant users the access they need.  If an account or service is broken into, the damage will be limited.
*** This is why we don't all access the shell server as {{Monospaced{root}}}.  I use an unprivileged user also and only elevate to {{Monospaced{root}}} when necessary.

!!! Privilege escalation
* Limit direct access to the {{Monospaced{root}}} account.  
* Privilege separation - Only obtain superuser privileges when you need them
** Don't always operate as the {{Monospaced{root}}} user
* Instead log in as a regular user and escalate when needed
** This is also good for accountability if many users have the root password.
** {{Command{su}}} command - Substitute user
*** Change the effective userid to another system user
*** Real id is the userid you log in as, the user id associated with the process that created the current process
*** Effective id is one the system uses to determine whether you have access to a resource
*** http://bioinfo2.ugr.es/OReillyReferenceLibrary/networking/puis/ch04_03.htm
*** {{Command{su [username]}}}  - Change to another user, simulating a full login.  The current shell environment will not be inherited.
*** {{Command{su - [username]}}}  - Change to another user, inheriting the current shell environment
** {{Command{sudo}}} - Allow elevated privileges on a limited scale (per command).  
*** {{Command{sudo}}} Allows an administrator to grant root privileges to users without divulging the root password.
*** Or allow a user to just run a few commands as the superuser.
*** Display what you are allowed to access via sudo: {{Command{sudo -l}}}
*** {{Command{sudo //command//}}} - Run a command as another user (defaults to the root user)
*** {{Command{sudoedit //file//}}} - Edit a file as another user (defaults to the root user).  Running {{Command{sudoedit //filename//}}} is the same as running {{Command{sudo -e //filename//}}}.
*** {{Command{sudo -l}}} - Display which commands are available to the current user via sudo
*** sudoers file: {{File{/etc/sudoers}}} - This is where the sudo configuration is saved.  Don't edit this file directly.  Use {{Command{visudo}}} to edit it.
**** Separate sudo configuration files can also be saved within the directory {{File{/etc/sudoers.d/}}} to keep things better organized.
**** {{Command{visudo}}} will lock the file and perform syntax checks after saving it.
** You can control who can access particular resources with user or group permissions
* Both {{Command{su}}} and {{Command{sudo}}} will log escalation events
** su will log when a unprivileged user switches to another user
** sudo/sudoedit will log each command executed or file modified
* setuid bit 
** set ID upon execute
** An extra permission bit that can be set with chmod 
** The program will run as the user who owns the file.
** Examples:  passwd and crontab commands
** The passwd command needs extra privileges in order to change a user's password, so extra system privileges are granted just to that command.

!!! Finer grained access controls
* ~SELinux and mandatory access controls (MAC)
** Enabled by default in ~CentOS
** ~SELinux will cause us problems if we don't either configure or disable it
** Controlled by the {{Command{setenforce}}} command for current boot
** and by the /etc/selinux/config file on boot
** It's presently disabled on all of our class ~VMs
* Filesystem access control lists (~ACLs)
** Finer grained per user access to files
** Controlled by {{Command{setfacl}}} and displayed by {{Command{getfacl}}}
** Active ~ACLs noted with a + at the end of the file permissions list

!!! Verifying users with PAM
* Pluggable Authentication Modules (Chapter 6, Page 125)
* Configuration resides in /etc/pam.d/
* Originally access was determined by just checking passwords against the password files
* Modules are used for user validation and verification
** Can determine who you are
** And if you have permission to access the resource
** Can also enable additional types of authentication, such as two-factor with hardware or soft tokens.
* Examples: 
** {{File{/etc/pam.d/su}}} - limit who can use the {{Command{su}}} command
*** uid 0 users can always run the {{Command{su}}} command
*** Change to require wheel group membership
*** Can set to implicitly trust members of the wheel group (dangerous!)
* Other pam functions: 
** Pam can also create home directories on first login with pam_mkhomedir
** Check password complexity with pam_crack
** Lock accounts on too many failed attempts with pam_tally or pam_faillock


!! Users and Groups

!!! Password files
* {{File{/etc/passwd}}} - Everyone can read this file
** Contains fields identifying the user
** It used to also contain the hashed password but this was moved elsewhere to hide it from normal users
** Don't leave the old password field (position 2) blank!  If blank, no password is required for login.  Use the placeholder character {{Monospaced{''x''}}}.
*** An {{Monospaced{''x''}}} in {{File{/etc/passwd}}} column 2 means see {{File{/etc/shadow}}} for the password hash
* {{File{/etc/shadow}}} (Linux) or {{File{/etc/master.passwd}}} (~FreeBSD) - Only root can read this file
** A secure file which contains the password hashes so normal users cannot read them for brute force cracking
** Also contains password and account expiration attributes
** More detail on this file and its fields can be found at https://linuxize.com/post/etc-shadow-file/
* Use {{Command{vipw}}} to edit these files so you have file locking and format verification
** This verification prevents errors from breaking access to the system
** {{Command{vipw}}} will edit the password file
** {{Command{vipw -s}}} will edit the shadow file
* password hashing:
** Sample password hash: {{Monospaced{ $6$hA6IJImd$~TCWDXE6zeHgRYKBNAG2jqHNMyPp9FCW2KdlVFKGWto9BcV9chEjCX3zZAzxx5tqbKn3wve13VWLD8Vb5O214x1 }}}
*** The full hash has three components, separated by the {{Monospaced{$}}} delimiter:  Algorithm type, salt, and hashed password
** Different hashing algorithms and their tags from old (weak) to new (strong):  DES, ~MD5 ({{Monospaced{$1$}}}), Blowfish ({{Monospaced{$2a$}}}), ~SHA256 ({{Monospaced{$5$}}}), ~SHA512 ({{Monospaced{$6$}}})
*** The tag at the beginning of the password hash identifies the algorithm used.
** {{Command{authconfig &#045;-test | grep hash}}} - See what hashing algorithm is used by default on your system
** {{Command{authconfig &#045;-passalgo=md5 &#045;-update}}} - Change the default hash type (don't actually run this)
** {{File{/etc/sysconfig/authconfig}}} - Authentication configuration settings
** {{File{/etc/libuser.conf}}}
** salting
*** Randomize hashes by adding a salt to the password before hashing
*** Prevents identical passwords from having the same hash
*** Increases difficulty for brute force attacks or hash lookup tables (rainbow tables), since now a potential password value has to be tested for each possible hash value.
** Password cracking:
*** John the Ripper
*** hashcat
*** GPU processing makes this all much faster now, especially for weak algorithms and passwords
*** Protect your hashes!

{{Note{''Note:'' On page 116, when the book discusses the {{File{/etc/shadow}}} file, they incorrectly refer to the password as //encrypted//.  Passwords are //hashed//, not //encrypted//.  There's a difference.  
* Hashing is a one-way function.  Once plaintext is hashed it cannot be converted back to plaintext.  We validate passwords by hashing the password someone provides at login and comparing that hash to the one that's stored.
* Encryption is a two-way function.  If plaintext is encrypted, it can later be decrypted and turned into plaintext again, if you have the key.  We briefly reviewed encryption with the web servers.  Your browser negotiates an encrypted channel with the web server.  They each send encrypted data that the other decrypts with the proper key.

The subtle difference may not matter for normal Linux users, but it's important for a security practitioner to understand the difference between hashing and encryption.
}}}


Time it takes to brute force these password types:
[img[img/2023_Password_Table.jpg]]


* uid numbers 
** multiple users with same UID number - The system only cares about the number.  If multiple users have the same UID number, then they are effectively the same user and can access each other's files
** System accounts (UID < 10)
** Service accounts (~UIDs between 10 and 500)
** Users UID > 500 (Linux) - Regular users.

!!! Group file
* {{File{/etc/group}}} - Where groups and group memberships are defined.
** wheel group - special administrator group.  Usually allows extra system access

!!! Shell
* default shell : {{Command{/bin/bash}}} (Linux) or {{Command{/bin/tcsh}}} (BSD)
* lockout shell : {{Command{/sbin/nologin}}}
** Users with this shell are not allowed to log into the system.  Service accounts or banned users will be set to this shell.
* Available shells defined in {{File{/etc/shells}}}

!!! Locking accounts
* Replace the hash with a {{Monospaced{*}}} or {{Monospaced{!!}}} to lock the account.
** This is not enough on //some// systems.  Users may still be able to log in with SSH keys instead of passwords.
* Also change shell to {{Command{/sbin/nologin}}}
** This is a standard lockout shell.  A user must have a valid login shell in order to connect to a system
** The command {{Command{/sbin/nologin}}} just echos //This account is currently not available.// and terminates, thus disconnecting the user from the system.
* {{File{/var/run/nologin}}} or {{File{/etc/nologin}}}
** If this file exists, only root will be allowed to log into the system.   The contents of the file will be displayed to the user before they are disconnected.
*** This is helpful if a system needs to be closed for temporary maintenance.
* Check out service accounts in the password file - they should not have passwords or valid shells
** A service account with a password or valid shell is being abused by an attacker.

!!! New user:
* Use utilities ({{Command{useradd}}}, {{Command{userdel}}}, {{Command{usermod}}}) or edit the password files directly
* Create a home directory for the user
** Set home dir ownership and permissions so the new user can access it
* Set up environment (dot files)
** Copy the environment configuration files within {{File{/etc/skel/}}} (Linux) or {{File{/usr/share/skel/}}} (~FreeBSD) to the new user's home directory
*** Note:  All environment configuration file names begin with a dot.
*** Don't forget to change ownership on the environment files in the user's home directory too

!!! Remove or lock user
* Delete or comment lines in password files
** Will no longer be known to the system, but non-destructive
** Change password hash and change shell


!!! Authentication factors:

Multi-factor authentication (MFA):
* Passwords are not good enough anymore; they are easily stolen.
* Increase security by combining multiple authentication factors.
* More sites and organizations are now requiring MFA
** [[Linux Kernel Git Repositories Add 2-Factor Authentication|http://www.linux.com/news/featured-blogs/203-konstantin-ryabitsev/784544-linux-kernel-git-repositories-add-2-factor-authentication]]
** SUNY Poly recently switched from GMail to MS Outlook and added MFA for email account login

!!!! Methods of authentication:
* ''Something you know'':  passwords
** Should be of sufficient length and complexity to be hard to crack
** Minimum of 10-12 characters
** correct horse battery staple: http://xkcd.com/936/
** Should be unique across systems
*** [[Russian Hackers Amass Over a Billion Internet Passwords|http://www.nytimes.com/2014/08/06/technology/russian-gang-said-to-amass-more-than-a-billion-stolen-internet-credentials.html?_r=0]]
*** [[Stolen user data used to access account|http://community.namecheap.com/blog/2014/09/01/urgent-security-warning-may-affect-internet-users/]]
*** [[ebay|http://money.cnn.com/2014/05/21/technology/security/ebay-passwords/]] 
** Password Cards: http://www.passwordcard.org/en, http://www.evenprime.at/2012/04/password-security-with-password-cards/, etc
** Password vaults
*** [[Password Safe|https://www.pwsafe.org]]
*** [[KeePass|https://keepass.info/]]
** One-time passwords (OTPW)

* ''Something you have''
** [[yubikey|http://www.yubico.com/]]
** [[Google Titan Key|https://cloud.google.com/titan-security-key/]]
*** https://www.cnet.com/news/google-made-the-titan-key-to-toughen-up-your-online-security/
** [[DoD CAC card|http://www.cac.mil/common-access-card/]]
** [[Google 2 factor|https://www.google.com/landing/2step/]]
** [[RSA SecurID|http://www.emc.com/security/rsa-securid/rsa-securid-hardware-authenticators.htm]]

* ''Something you are''
** biometrics:  fingerprint, retina, voice print, facial, vein patterns

* ''Somewhere you are''
** Geofencing - Tie authentication to a particular location
*** Someone may only log in or may not log in from a specific geographic location
** ~GeoIP libraries
** pam_geoip


!!! SSH authentication & increasing security

!!!! ssh keys
* Access systems with keys instead of just passwords for added security
* 1.5 factor authentication:  Slightly better then just passwords
* Create keypairs with ssh-keygen
** Asymmetric keypairs are used for authentication.  You keep the private key secure and locked with a passphrase.  The public key is distributed to systems you have permission to access.
* Public keys are stored in ~/.ssh/authorized_keys
* Host public keys are stored in ~/.ssh/known_hosts
* ssh-agent & ssh-add : add your ssh keys to the agent to be used for connecting to multiple systems
* pssh - parallel ssh for connecting to multiple systems

!!!! sshd configuration
* Host keys
** Host key warning - A warning appears on new systems to verify the host key to ensure you're not the victim to a man-in-the-middle attack
* Require SSH keys to access the system (disable password authentication)
** A little more secure then just passwords.  An attacker cannot just capture a password, they also must capture the SSH key
* Deny root login - Don't allow users to log in directly as root.  Must log in first as a regular, unprivileged user and then escalate to root with either {{Command{su}}} or {{Command{sudo}}}
** No system should allow direct root login.  Turning this off is an excellent security first-step
** Our shell server sees about 50 attempts per day to log in as root.  Countermeasures identify and block these attackers.
** {{Command{grep 'sshd-root.*Found' /var/log/fail2ban.log | wc -l}}}
* Require group membership - Must be in a particular group to log in to the system via ssh


! Assignment

<<tiddler [[Lab 62 - VM Lockdown - Secure your VMs]]>>

----

<<tiddler [[Lab 63 - sudo]]>>

----

<<tiddler [[Lab 64 - Enable Two-Factor Authentication]]>>

----
/%
<<tiddler [[Lab 65 - SSH Intrusion]]>>
%/
----
! Wrapping up
!! Closing out the semester

!!! VM deletion

Our lab environment for this class will be decommissioned on ''Saturday, May 11''.  If there is anything you would like to complete or back up, please do so by then.  Please let me know if there's anything you need help saving or run short on time.

!!! Additional Resources
!!!! A lab environment similar to ours can easily be replicated from open source tools:
* [[Proxmox|https://www.proxmox.com/en/]]: The hypervisor our ~VMs are running on.  Works great on a spare server or PC that's kicking around.
* [[Naemon|https://www.naemon.org/]]: Infrastructure monitoring
* [[SaltStack|https://www.saltstack.com/]]: Infrastructure Management & Orchestration - I used this to easily run commands on all class ~VMs and maintain baseline configurations
* [[NameCheap|https://www.namecheap.com]]: Domain Registration - A simple, clean interface and free domain privacy.
* [[DigitalOcean|https://www.digitalocean.com/]]: Low cost cloud ~VMs - I use these for my personal infrastructure.  Good Linux ~VMs for $5 per month.
* [[Hetzner|https://www.hetzner.com/sb?country=us]]: Low-cost bare metal cloud servers.  This is the hosting provider for our current class lab environment.

!!! Class website mirror
The entire class website runs from a single HTML file.  A zip file containing the HTML file along with linked images, videos, and lab ~PDFs can be downloaded from https://www.ncs205.net/ncs205.zip
* Last updated 5/5/24 @ 19:45
/% - The link will be live once finals end - %/

/%!!! Feedback
I hope everyone enjoyed this class and got something useful from it.  The material I included is the highlights of what you'll need to be exposed to if you'll be using Linux in and beyond the NCS program.  If you have any feedback to offer, good or bad, please let me know.  I'm always looking for ways to improve the class for the next semester.  %/

! Final Exam

<<tiddler [[hack6 break-in]]>>

! Outstanding Labs

Any outstanding labs must be submitted by 6pm Saturday, May 4.  Please let me know if you expect a problem meeting that deadline for outstanding work and we'll discuss options.
! Material
!! Read:
* Chapter 3 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!! Watch:
* [[Navigating the filesystem|https://www.youtube.com/watch?v=j6vKLJxAKfw]]


! Working on the command line

!! File types

In the Unix world, everything is represented by a file.  Regular files and directories are files, but so is hardware.  Each piece of hardware connected to the system is associated with a file within the directory {{File{/dev/}}}.
There are three main types of files we will be working with:
* Ordinary files.  These are regular files on the system, divided into:
** Plain text - Files you can read if you were to open them in a text editor
** Binary - Machine code, such as programs intended to be executed.
* Directories - The organizational unit for files within the filesystem
* Symbolic Links - These are pointers to another file somewhere on the system.  The symbolic link only contains the path to its target.

The [[Week 1, Part 2]] page has a more complete breakdown.  Be sure to review the information on that page again.

!! Navigating the filesystem:

!!! Directory paths
* Paths in Unix are very similar to the command line on Windows
* Here, the path separator is a forward slash - {{File{/}}}
* You can change directories with the {{Command{cd}}} command
** eg: {{Command{cd /opt/pub/ncs205/submit}}}
* List the contents of the directory with the {{Command{ls}}} command
* List the contents of the directory in long format with the {{Commandls -l}}} command
** eg: {{Command{ls -l /opt/pub/ncs205/submit}}}
* Some directories of interest:
** {{File{/home/}}} - //Most// user home directories reside within this directory, but not all.
*** This is only a convention, not a requirement.  A user home directory can be anywhere on the system.
** {{File{/opt/pub/ncs205/submit/}}} - Lab/Homework assignments are uploaded to this directory
** {{File{/opt/pub/ncs205/data/}}} - Data files for labs are stored here
** {{File{/opt/pub/ncs205/returned/}}} - Graded and returned Lab/Homework assignments are moved to this directory for your retrieval.  
** {{File{/tmp/}}} - Temporary scratch space
** {{File{/bin/}}} & {{File{/usr/bin/}}} - Where most program files reside

!!! Listing files - {{Command{ls}}}
The {{Command{ls}}} command will list the contents of a directory.  Extra options can be used to alter the default behavior of the {{Command{ls}}} command:

!!!! Display in long list format:  {{Command{ls -l}}}
The {{Command{-l}}} option will display the contents of the directory in long listing format.  This displays additional metadata about a file, such as the file type, ownership information, size, and modification timestamp.
The first character on the line indicates the type of file:
* ''d'' for directory
* ''l'' for link
* ''-'' for regular file
{{{
[root@lab ~]# ls -l
total 20
total 152
drwx------   2 root root   173 Nov 23  2020 bin
-rw-------.  1 root root   903 Feb 18  2021 commands.txt
-rw-------   1 root root 57238 Aug 25  2020 packages.txt
-rw-------   1 root root   465 Sep  4 13:16 user2109.txt
}}}

!!!! Display hidden files:  {{Command{ls -a}}}
Files beginning with a dot are hidden and not normally displayed with the {{Command{ls}}} command.  Adding the {{Command{-a}}} option will allow them to appear:
{{{
[root@lab ~]# ls -al
total 152
dr-xr-x---.  6 root root  4096 Sep  4 13:16 .
dr-xr-xr-x. 17 root root   256 Aug 31 12:42 ..
-rw-r--r--.  1 root root   287 Dec 15  2020 .bash_profile
-rw-r--r--.  1 root root   176 Dec 28  2013 .bashrc
drwx------   2 root root   173 Nov 23  2020 bin
-rw-------.  1 root root   903 Feb 18  2021 commands.txt
-rw-r--r--.  1 root root   100 Dec 28  2013 .cshrc
-rw-------   1 root root 57238 Aug 25  2020 packages.txt
drwx------.  2 root root    71 Aug 28 17:38 .ssh
-rw-------   1 root root   465 Sep  4 13:16 user2109.txt
}}}

!!!! Sort by modification time:  {{Command{ls -t}}}
Adding the {{Command{ls -t}}} option will sort by modification time instead of by file name with the oldest files on the bottom.  The {{Command{ls -r}}} reverses the default sort to instead put the newest files on the bottom.

Combining all of these options to see a long listing sorted by reversed time (with the newest files on the bottom) is often handy:
{{{
[root@lab ~]# ls -lrt /opt/pub/ncs205/data
total 496
drwxr-xr-x. 3 root    10000     61 Feb  7  2018 lab7
drwxr-xr-x. 2 root    10000     74 May 27  2019 lab8
drwxr-xr-x. 2 root    10000  16384 May 31  2019 lab9
-rwxr-xr-x. 1 merantn users 471148 Jan 30  2020 whatami
drwxr-xr-x. 2 root    10000    136 Feb 13  2020 lab10
drwxr-xr-x  2 root    root      41 Feb 16  2020 lab15
d---------  2 root    root      18 May  4  2020 final
drwxr-xr-x  2 root    root     238 Oct 16  2020 filter_examples
drwxr-xr-x  2 root    users    207 Mar 13 12:31 lab21
}}}


!! Executing commands

!!! Structure of a command string:
* {{Command{command [options] [arguments]}}}
** options will begin with either a single dash (''&dash;'') or a double dash (''&dash;&dash;'')
** options and arguments may be optional or required depending on the command.
** Best practice is to enter the three components in that order
** The {{Monospaced{'' [ ] ''}}} in the usage example above indicates that component is optional.  The command is always required.  Some commands also require arguments.  The synopsis in the command's man page will indicate which components are required for different ways to use the command.

!! Viewing files
* Display the contents of a file: {{Command{cat //filename//}}}
* Display the contents of a file one page at a time: {{Command{less //filename//}}}
* Edit a text file: {{Command{nano //filename//}}} -or- {{Command{vi //filename//}}}

!! Working efficiently - some shortcuts
* View your previously executed commands with the {{Command{history}}} command
* Tab completion - Press the tab key to autocomplete commands or file paths
** Enter the first few letters of a command or file, press tab, and the shell will fill in the remaining letters (if it can)
* Up / Down arrows - search up and down through your command history to rerun a previously executed command
* Page Up / Page Down - Use these keys to search through your command history for the last commands which begin with a given string
** Type a few letters from the beginning of a previously executed command and press Page Up.  The shell will return to the last command you executed which began with those letters.

!! Other useful commands:
* {{Command{touch}}} - create an empty file
* {{Command{file}}} - examine a file to identify its type
* {{Command{strings}}} - display the plain text strings within a binary file.  Often useful for forensics and identifying what a binary is or does.


! Deliverables

!! Read Chapter 3 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
* Complete [[Lab 3|labs/lab3.pdf]] & [[Lab 4|labs/lab4.pdf]]
! Material
!! First complete this lab for review:
- Complete [[Lab 5|labs/lab5.pdf]]
!! Read:
* Chapter 4 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
** You can gloss over the parts about wildcards (pp 26-27) for now.  We'll come back to them later.
** Focus on becoming familiar with the available commands (also listed below).
!! Watch:
* Creating and Deleting files and directories: https://www.youtube.com/watch?v=91FhiTyEaCU
* Moving and copying files: https://www.youtube.com/watch?v=GKEGNdNIQrw


! Manipulating Files & Directories

Every operating system has a basic set of utilities to manipulate files and directories on the command line.  This reading assignment will introduce those commands in the Linux operating system.


!! Basic file manipulation commands

Rename and move files - {{Command{mv}}}
Copy files  - {{Command{cp}}}
* Recursive copy: {{Command{cp -R}}}
Create directories - {{Command{mkdir}}}
Delete files and directories  - {{Command{rm}}} & {{Command{rmdir}}}
* Difference between unlink & delete - Removing a file doesn't actually delete it from the system, it only marks the space it was occupying as available for reuse.
* This is more appropriately called "unlinking".  We're only removing the //link// between the filesystem and the data blocks.  The actual data will still reside on the disk and can be forensically recovered.
* The {{Command{srm}}} or {{Command{shred}}} commands will actually destroy a file by overwriting its data blocks before unlinking it.
** These commands are not installed by default on most systems
** Securely wiping a file is more resource intensive than simply unlinking it because random data must first be generated and then written to disk.
View the contents of files with {{Command{cat}}} and {{Command{less}}} (or {{Command{more}}}, an old version of {{Command{less}}})
** [[less quick reference]]
Edit text files:
* The standard Unix text editors are {{Command{nano}}} (Basic and easy to use) and {{Command{vi}}} (more powerful, but harder to get used to)
* {{Command{vi}}} comes installed on every Unix/Linux system.  The {{Command{nano}}} editor may need to be installed separately.
* {{Command{vi}}} has a learning curve, but is the professional text editor.  If you'll be doing this work in the future, learning {{Command{vi}}} is well worth the time investment.
** Some {{Command{vi}}} handouts: [[Command line summary handout|handouts/UnixCommandSummary.pdf]] (last page) & [[vi diagram handout|handouts/viDiagram.pdf]]

!! Other useful commands:
* {{Command{touch}}} - create an empty file
* {{Command{file}}} - examine a file to identify its type


! Deliverables

!! Review Material:
* Complete [[Lab 5|labs/lab5.pdf]]

!! New Material:
* Complete [[Lab 6|labs/lab6.pdf]] & [[Lab 7|labs/lab7.pdf]]
! Material

* Links:
** Read 
*** Chapter 3, pp 23 & 24
*** Chapter 4, pp 33 & 34
** Watch:
*** Links - https://www.youtube.com/watch?v=lW_V8oFxQgA

* File Globbing:
** Read 
*** Chapter 4, pp 25-27 (Wildcards)
*** Chapter 7, pp 72 & 73 (Brace Expansion)
** Watch: 
*** File Globbing: https://www.youtube.com/watch?v=QIysdjpiLcA
*** Brace Expansion: https://www.youtube.com/watch?v=LGzSnVYS2J4


! Links & File Globbing

!! Links

Links are a mechanism to connect two files together.  There are two different types of links within the Unix environment:  
# ''Hard links''
# ''Symbolic links''

The two differ in how the link connects to its target.  Those differences impact the situations in which each can be used and when one is more appropriate than the other.

Using the following directory listing as an example:

{{{
[merantn@shell dict]$ pwd
/usr/share/dict

[merantn@shell dict]$ ls -l
total 9680
-rw-r--r--. 2 root root 4953680 Jun 10  2014 linux.words
-rw-r--r--. 2 root root 4953680 Jun 10  2014 wordlist
lrwxrwxrwx. 1 root root      11 Feb  4 21:07 words -> linux.words
}}}

We can identify the file named {{File{words}}} as a ''symbolic link'' due to the ''{{Monospaced{l}}}'' character at the beginning of the line and the arrow pointing to its target.  A symbolic link is a special type of file that only contains the path to the file it is pointing to.  

''Hard links'' are directory entries which point to the same inode.  An inode is a filesystem data structure which contains information about the file and where its blocks can be found on the underlying storage medium.  Thus, hard links point directly to the same place on the disk.  We can tell that the files {{File{linux.words}}} and {{File{wordlist}}} are hard links because of the number ''2'' in the second column (from the {{Command{ls -l}}} command output above; third column below).  This is the link count.  It will increase as more hard links are created.  A file isn't truly deleted until its link count reaches zero.

{{{
total 9680
19220022 -rw-r--r--. 2 root root 4953680 Oct 14  2019 linux.words
19220022 -rw-r--r--. 2 root root 4953680 Oct 14  2019 wordlist
19220023 lrwxrwxrwx. 1 root root      11 Oct 14  2019 words -> linux.words
}}}

In the above output, adding the ''{{Monospaced{-i}}}'' flag to the {{Command{ls}}} command shows the inode number for the file.  We can see the files {{File{linux.words}}} and {{File{wordlist}}} are both hard links pointing to the same place on the disk because they both are pointing to the same inode number.  Identifying hard links by inode number will be more difficult if the links are not in the same directory.

This graphic may help visualize the relationship between the different link types and their targets.
[img[img/links.png]]

!!! Link commands:

* Create links with the {{Command{ln}}} command:
** Create a hard link:  {{Command{ ln //file// //link//}}}
** Add the {{Command{ -s }}} option to create a symbolic link:  {{Command{ ln -s //file// //link//}}}
* In each case, {{Monospaced{ //file// }}} is the target file that currently exists and {{Monospaced{ //link// }}} is the name of the new link you are creating.


The textbook pages listed at the top will contain more information about these two link types.


!! File globbing & wildcards

So far, when working with files we've specified one filename at a time on the command line. Other shell metacharacters exist to identify files by patterns in their filenames and work with them as a group. Suppose we want to move all files that end in ''.jpg'' to a particular location, or delete all files that contain the string ''temp'' in their filename. If there's thousands of them, it's going to be very tedious to have to list each of the files individually. Or, we can instead use special file wildcard metacharacters to concisely identify these groups of files by common characteristics. This is referred to as ''filename substitution'' or ''file globbing''.


!! Filename substitution

Metacharacters associated with file names: &nbsp; {{Monospaced{''~ * ? [ ] [^ ] { }''}}}

* {{Monospaced{''*''}}} = match any sequence of 0 or more characters
* {{Monospaced{''?''}}} = match any single character.
** It's important to note the ''?'' is a mandatory position which must be filled. It's not optional like the ''{{Monospaced{''*''}}}'' is. So if you type {{Command{ls /bin/d??}}}, you'll see a list of all files in {{File{/bin/}}} which begin with a ''d'' and are exactly three letters in length. You will not see the files which are shorter then three characters, such as the {{Command{df}}} command or longer than three characters such as the {{Command{diff}}} command.  Both ''?'' must contain a character.
* {{Monospaced{''[ ]''}}} - match any of the enclosed characters in the set (eg: ''[abcd]''), or match a range (eg: ''[a-z] [~A-Z] [0-9] [e-q]'')
** The {{Monospaced{''[ ]''}}} brackets are similar to the ''?'' in that they specify a single, mandatory character. Where the ''?'' wildcard can represent any character, the brackets allow us to be a little more specific with what that single character may be.
** The {{Monospaced{''-''}}} within the {{Monospaced{''[ ]''}}} specifies the range of characters based on its position in the [[ascii chart|img/ascii-chart.gif]].  For example, {{Monospaced{''[4-6]''}}} or {{Monospaced{''[;-?]''}}} to match the characters {{Monospaced{''; < = > ?''}}} (ascii 59 to ascii 63).
*** Ranges and lists of characters can be combined.  The gobbing pattern {{Monospaced{''[ac5-8()]''}}} will match the letters {{Monospaced{''a''}}} and {{Monospaced{''c''}}}, the numbers {{Monospaced{''5''}}} through {{Monospaced{''8''}}}, and the two parenthesis.
** {{Monospaced{''[^ ]''}}} - match any character //not// enclosed in the set or range (eg: ''[^abcd]'' or ''[^a-z]'').  The notation ''[! ]'' is sometimes used but not universally recognized.  Use ''[^ ]'' instead.  The labs will all use ''[^ ]''.
* {{Monospaced{''{ }''}}} - Brace Expansion.  Expand comma separated strings to create multiple text strings from a pattern. Example: {{Command{mkdir -p {one,two,three}/examples}}} will create the directories {{File{one/examples}}}, {{File{two/examples}}}, and {{File{three/examples}}}.

{{Note{''Note:'' Negation should only be used when it is the best possible method for solving the problem, not as a way to be lazy.  If the question asks to list a particular set of files, try to find a way to target just those files.  Negation is ideal when the question includes a negation term, such as the wording //except// or //do not//.  When negation is abused, often files are matched which did not intend to be.  }}}
{{Warning{''Warning:'' Try to be as specific as possible when you are using wildcards.  It's best practice to type out the static text and only use wildcards for the dynamic part of what you are trying to match.  For example, if I am trying to match the files {{File{data1.txt}}}, {{File{data2.txt}}}, {{File{data3.txt}}}, and {{File{data4.txt}}}, the best file globbing pattern would be {{Command{data[1-4].txt}}}.  It is as specific as possible and includes the static portions of the filename.  Using {{Command{data?.txt}}} would inadvertently match {{File{data5.txt}}} and {{Command{*[1-4].txt}}} could match something else entirely.  Even if those files are not currently in the directory, they might be later.  Don't be lazy with your file globbing patterns!}}}

!!! Examples  - Display all files who's names:

Begin with the letter f: {{Command{ls f*}}}
&nbsp;&nbsp;&nbsp;(read as: list files which begin with an ''f'' followed by ''0 or more characters'')
Contain a number: {{Command{ls *[0-9]*}}}
&nbsp;&nbsp;&nbsp;(read as: list all files which may begin with ''0 or more characters'', followed by ''any number'', and end with ''0 or more characters'')
begin with an uppercase letter: {{Command{ls [~A-Z]*}}}
begin with the letter a, b, or c: {{Command{ls [abc]*}}}
begin with the letter a, b, or c and is exactly two characters in length: {{Command{ls [abc]?}}}
do not begin with the letter a, b, or c: {{Command{ls [^abc]*}}}
end with a number from 2 to 9 or a letter from w to z: {{Command{ls *[2-9w-z]}}}
are exactly two characters long and begin with a lowercase letter: {{Command{ls [a-z]?}}}
being with string one, end with string three, and contain string two somewhere in between: {{Command{ls one*two*three}}}


{{Warning{''Warning:'' Working on the command line requires an eye for detail. We're starting to get to the point where that detail really matters. There's a huge difference between the commands {{Command{rm *lab6*}}} and {{Command{rm * lab6*}}}. One stray space and you're going to be in for some missing labs. Take a second look at your commands before executing them and be very deliberate with what you're running. Remember - Working on the command line is precise. Every character matters and we must have an eye for detail!}}}


!! Substitutions

Through use of shell metacharacters, substitutions are transformations performed by the shell on command line input prior to executing a command string. File globbing is one of the 5 types of shell substitutions.

It's important to understand the order of operations here. In the math formula 5 + 6 * 7, our calculations are not automatically performed left to right. There is a set order of operations that calls for the multiplication to be performed first. The same idea applies to entering command line input. First, all substitutions are performed by the shell, then your command string is executed. 

Consider the command {{Command{ls *.jpg}}}

The shell recognizes that we're performing a substitution (eg: {{File{*.jpg}}}) and replaces {{File{*.jpg}}} in the command string with a list of all files that match the pattern.
Next, the {{Command{ls}}} command is executed with the list of files as arguments

A great way to preview the result of any substitutions is with the {{Command{echo}}} command. The {{Command{echo}}} command repeats back to the screen whatever you give it as an argument. For example:

{{{
[merantn@shell ~]$ echo hello ncs205
hello ncs205
[merantn@shell ~]$ cd /opt/pub/ncs205/data/lab9
[merantn@shell lab9]$ echo rm IMG_126?.jpg
rm IMG_1260.jpg IMG_1261.jpg IMG_1262.jpg IMG_1263.jpg IMG_1264.jpg IMG_1265.jpg IMG_1266.jpg IMG_1267.jpg IMG_1268.jpg IMG_1269.jpg
}}}

So if I have a complex or risky substitution, I may want to prefix the command string with the {{Command{echo}}} command to preview it before its executed:
eg: Change to {{File{/opt/pub/ncs205/submit/}}} and run: {{Command{echo ls *lab[1-3]*}}} to see what substitution is being performed and the actual command string about to be executed. Don't forget to prefix it with {{Command{echo}}}!

These file globbing substitution examples are pretty tame, but this trick with the {{Command{echo}}} command will come in very handy later on when we get to more complicated substitutions.


! Assignment

* Links:
** Read 
*** Chapter 3, pp 23 & 24
*** Chapter 4, pp 33 & 34
** Watch:
*** Links - https://www.youtube.com/watch?v=lW_V8oFxQgA
** Complete: [[Lab 8|labs/lab8.pdf]]

* File Globbing:
** Read:
*** Chapter 4, pp 25-27 (Wildcards)
** Watch: 
*** File Globbing: https://www.youtube.com/watch?v=QIysdjpiLcA
*** Brace Expansion: https://www.youtube.com/watch?v=LGzSnVYS2J4
** Complete:  [[Lab 9|labs/lab9.pdf]] & [[Lab 10|labs/lab10.pdf]]
*** ''Note:'' Lab 10 is due Saturday.
! Improving soft skills

Our first few labs brought up some soft-skill gaps.  Advancements here will be important for this course and your future careers:
# Read the directions thoroughly
# Ensure you're meeting all requirements
# Be thorough in your writing
# Consider usability - presentation matters
# Use proper terms
# Test your theories

!! 1. Read the directions thoroughly

Lab 7 contained this phrase in bold within the directions at the top of the page:  ''Commands executed must function from anywhere on the system.''

About half the class ignored that and lost points from several of the questions.

Question 1 was a good example:

<<<
1. //Create an empty file named ''source'' @@in your home directory@@ using a single command.//
<<<
About half the class responded with a solution similar to this:  {{Command{touch source}}}

That command gets the job done, but //only// if your current working directory is your home directory.  What if you're somewhere else in the filesystem?  The full path to the file we wish to create must be included to indicate exactly where on the system it should be created.  Either the solution {{Command{touch /home/ncs205///username///source}}} or {{Command{touch ~/source}}} will properly include the path to your home directory and provide a command which will work from __anywhere on the system__.


!! 2. Ensure you're meeting all requirements

The directions for groups of questions or instructions within the questions themselves will have small details in their requirements which will need to be addressed.  Some questions will ask for both the command and output.  Often, students overlook the need for output and only supply the command.

It's always wise to read the directions for the lab or instructions for a question, add your responses, then re-read the details to ensure your responses match what is requested.

Lab 3, Question 1 was a good example:

<<<
3. //A command string may be composed of three different components: The command, options, and arguments. Explain the purpose of each component @@and what must separate them@@. Provide an example containing all three components to further illustrate your point, preferably not one already demonstrated in the book.//
<<<

Quite a few forgot to mention that whitespace must separate our command, options, and arguments and lost points.  It might be helpful to use your PDF reader's highlight function to highlight these requirements as you read the question to help ensure you're addressing all of them.


!! 3. Be thorough in your writing

We're not in the classroom where I can easily ask you to clarify your responses.  In writing, you must be thorough so the reader understands your message.  This is important now for grading to convey that you fully understand what's going on and will be important later when it comes time for you to create documentation or explain things to colleagues.  I encounter far too much poor "professional" documentation at my day job which is either vague or omits critical details.  Lab 5, questions 8 and 9 highlighted this.  Too many points were lost unnecessarily do to incomplete explanations. 

I provided an example for the first command in question 8:
<<<
8.  //Summarize the actions performed by the following commands (don’t just copy the output)://
<<<
|ls&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;List the contents of the current directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|

This explanation concisely lists the action which is taken (list the contents) and the object which is acted upon (the current directory).  

For the second command, {{Command{ls -a}}}, a common response I received was //list hidden files//.  This response omits two critical details:  Are //only// hidden files to be listed?  Which hidden files are we listing?  A thorough response would be something like: //list all files, including hidden files, in the currently working directory//.  Here we're concisely explaining all of the components of this command:  What the command does (list all files), the option provided (including hidden files), and the target (in the current working directory).

Even worse, for the command {{Command{ls /}}}, I received several responses which were simply //root directory//.  What about it?  What's the action that's being taken?  Here, the action taken upon the ''@@root of the filesystem@@'' is omitted completely.  A response for this command should be something like //list the contents of the ''@@root of the filesystem@@''//.  We're explaining both the command and the target.

Notice the highlighted text above and the output of the command {{Command{ls /}}}:
{{{
[merantn@lab ~]$ ls /
bin   dev      etc   lib    media  opt   root  sbin  sys  usr
boot  entropy  home  lib64  mnt    proc  run   srv   tmp  var
}}}
There is an entry in this output named {{File{//root//}}}.  Using //root directory// in the response to that question is ambiguous.  Does //root directory// mean {{File{/}}} or {{File{/root/}}}?  Referring to {{File{/}}} as //the root of the filesystem// helps eliminate that problem.


Another example:  For the command {{Command{ls .}}}, I received the response //lists the current directory//.  The command {{Command{pwd}}} will display the current directory.  By default, the {{Command{ls}}} command lists the //contents of// its target.  There's a big difference between the two.

Yet another example:  For #9, I occasionally receive the terse response //Changes to tmp directory// as a response to the commands {{Command{cd /tmp/}}} and {{Command{cd /var/tmp/}}}.  Those are two different paths.  How can the answer be the same for both of them?  Be specific - you're provided with an absolute path in the question, so it might be a good idea to use the same absolute path in the responses.


We'll be in this situation throughout the semester.  Be sure your responses are thorough and do not omit the critical details.  Even if you never touch the Linux command line again, improving your writing will be a universal skill that will serve you well later.


!! 4. Consider usability - presentation matters

Usability and good presentation should be part of everything you produce.  Lab 3, #6 provides a good example:
<<<
6. //The Unix manual is an excellent system resource to learn more about commands on the system or as a quick reference for their usage. Use what you learned in the first three chapters to identify four commands on the shell server which were not already discussed in the book or class website. Research these commands and write a summary of each below. Include the name of the command, a summary of its function, and an example of its usage.//
<<<

Of these two options, one is clearly presented better and easier to read than the other.  Try to avoid large blocks of text when possible; space out separate items to improve readability.

{{Monospaced{
The 'grep' command searches for a specified pattern in files or input and outputs the lines that contain the pattern.  grep "error" server_logs.txt searches for the word "error" in the file "server_logs.txt".  chmod: This command changes the file mode bits (permissions) of each given file, directory, or symbolic link. chmod 755 script.sh sets the permissions of "script.sh" to "755" (read, write, and execute for owner; read and execute for group and others).  The 'tail' command outputs the last part of files. It's commonly used to view the newest entries in log files.  tail -n 20 server_logs.txt displays the last 20 lines of the file "server_logs.txt".  du - This command is used to estimate file and directory space usage. du -sh /home/user/Documents displays the total size of the "Documents" directory in a human-readable format.
}}}

{{Monospaced{
1. grep: The 'grep' command searches for a specified pattern in files or input and outputs the lines that contain the pattern.
&nbsp; - Example: grep "error" server_logs.txt searches for the word "error" in the file "server_logs.txt" and displays those lines to the screen

2. chmod: This command changes the file mode bits (permissions) of each given file, directory, or symbolic link.
&nbsp;  - Example: chmod 755 script.sh sets the permissions of the file "script.sh" to "755" (read, write, and execute for owner; read and execute for group and others).

3. tail: The 'tail' command outputs the last part of files. It's commonly used to view the newest entries in log files.
&nbsp;  - Example: tail -n 20 server_logs.txt displays the last 20 lines of the file "server_logs.txt" to the screen.

4. du (Disk Usage): This command is used to estimate file and directory space usage.
&nbsp;  - Example: du -sh /home/user/Documents displays the total size of the "Documents" directory in a human-readable format.
}}}

!! 5. Use proper terms

Lab 5 questions 8 and 9 asked you to explain what the commands {{Command{ls ..}}} and {{Command{cd ..}}} will do.  I received a lot of responses that contained the phrase "//previous directory//" to refer to the {{File{..}}} portion of that command string.  //Previous directory// is ambiguous.  To me, that refers to the last directory you were in.  The directory {{File{..}}} is a special directory that refers to the ''//parent//'' of a directory, so the command {{Command{ls ..}}} will //display the contents of the parent of the current working directory//.  The command  {{Command{cd ..}}} will change you to the //parent of your current working directory// whereas {{Command{cd -}}} will //return you to the previous directory you were in//.


!! 6. Test your theories

Lab 6, question 7 was a good example of this problem:

<<<
//7. Explain each of the three arguments and the result of executing the following command string:  {{Command{mv one two three}}}.//
<<<

I receive some pretty wild responses to this one.  The most common two incorrect answers are:

* //move file one to file two and file two to file three// 
* //move files one and two to directory three.  __Directory three will be created if it does not exist.__//

We have a lab environment available to us for practicing the material and testing your solutions. If these answers were tested, it would be very obvious they are incorrect.  Submitting untested solutions will especially be a problem later in the semester when we get to more complicated material.  Don't be lazy and just guess.  I tend to grade far more harshly when I encounter such obviously incorrect responses.


! Material

This is going to be a light section.  Please take the time to review any past work and ensure you're caught up.  If you're having trouble or are unsure of anything, please take advantage of the discussion boards or reach out to schedule a chat.

!! Home directories

I'm adding another lab (Lab 11) to give a little more practice working with home directories and the shortcut metacharacter involved with them.  The bottom of page 70 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] has the details.


!! Working with command documentation

Read Chapter 5, pp 44-47 on the {{Command{help}}} and {{Command{man}}} commands.  Pay particular attention to the part about notation and be sure to know how to interpret the documented usage of these commands:
{{{
cd [-L|[-P[-e]]] [dir]

vs. 

mkdir [OPTION]... DIRECTORY...
}}}


! Assignment

* Home directories:
** Complete:  [[Lab 11|labs/lab11.pdf]]
* Documentation
** Read Chapter 5, pp 44-47 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] to learn about the {{Command{help}}} and {{Command{man}}} commands.
! Material

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: 
*** File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
*** Umask: https://www.youtube.com/watch?v=xz2_1UEweKM

! Notes

!! File Permissions

The Unix operating system has multiple levels of securing access to resources. We can restrict who can access the system through userids and login credentials, we can limit who can become the superuser and act as the administrator of the system, we can control who can access certain directories on the system, and we can control access to files. The first two are items for an administrator to configure, but the latter two regular users can control for files that they own. Being able to restrict access to certain files is a critical function of a multi-user system. For example, we restrict access to the lab assignments everyone is uploading so no one else peeks at your work. Certain sensitive system files are restricted to keep the system more secure.

Hopefully by now we're comfortable navigating the filesystem and identifying files by name, both individually and in groups. Next I'd like to examine how we can manipulate the file's permissions.

Permissions can be set based on three different tiers:

* User - the owner of the file
* Group - a group that has access to the file
* Others - everyone else on the system

And three different permissions can be set on each file

* Read - The ability to read a file or list the contents of a directory
* Write - The ability to modify content of a file or create files in a directory
* Execute - The ability to run a program or access a directory

Chapter 9 in the The Linux Command Line will discuss permissions in detail.

This youtube video is a good permission overview:  [[File Permissions|https://www.youtube.com/watch?v=8SkN7UofOww]]


!!! File & Directory Permissions

The following tables and graphics can serve as a quick reference:

!! File & Directory Permissions
|!Type|!File|!Directory|
| read (4) | read contents | List directory |
| write (2) | change / delete file | Add files |
| execute (1) | run executable | cd into |

!!!! chmod

The {{Command{chmod}}} command can be used to change permissions for existing files.
* using octal codes
** Read (4), Write (2), and Execute (1)
** Three positions:  user, group, and others
* using symbolic codes
** who:
*** u - user
*** g - group
*** o - others
*** a = all positions
** operator:
*** = explicitly set
*** + add permission
*** - remove permission
** permission:
*** r = read
*** w = write
*** x = execute

{{Note{''Note:'' Use symbolic abbreviations when making changes to permissions without consideration to what is already set, eg: when adding or removing permissions. The use of octal codes requires all permissions be completely reset - a user cannot set, add, or remove individual permission settings.

For example, suppose I only want to __add__ write permissions for the group. Without knowing what the permissions currently are, I have to use symbolic notation to modify the permissions on the file. In this case with {{Command{chmod g+w //file//}}}

If the lab question asks you to ''set'' permissions, use __octal codes__. If it asks you to ''add or remove'', use __symbolic__ abbreviations.
}}}

<html><center><img src="img/chmod1.png" alt=""><BR><BR><HR width="75%"><img src="img/chmod2.png" alt=""></center></html>


!!! umask

The {{Command{umask}}} command can be used to establish default permissions for all newly created files.

* umask - user mask - which permissions to restrict. (mask = remove)
* start with full permissions 777
* The umask value is which bits to remove.
* The execute bit (1) will automatically be subtracted from all positions for regular files
* Making a new regular text file executable must be a manual task

A mask refers to bits to be removed. If we do not want newly created files to have write permissions for the group or others, we need to mask 2 from the group and others positions, resulting in a umask of 22.

Examples:

A umask value of 22 will set default permission for new files to 644 (777 - 22 - 111) and directories to 755 (777 - 22)
A umask value of 77 will set default permission for new files to 600 (777 - 77 - 111) and directories to 700 (777 - 77)
''Note:'' Newly created files are not granted execute automatically despite the umask value.


!! Misc:

The book creates empty files for its examples with {{Command{> foo.txt}}}.  This is the same as executing {{Command{touch foo.txt}}}.


! Assignment

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
** Complete:  [[Lab 12|labs/lab12.pdf]] & [[Lab 13|labs/lab13.pdf]]
! Material

* Read Chapter 6 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
** Watch Linux Sysadmin Basics 04 -- Shell Features -- Pipes and Redirection - https://www.youtube.com/watch?v=-Z5tCri-QlI

* Read Chapter 20 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].  Stop with {{Command{tr}}} at the bottom of page 299.
** Focus on the filters listed below
** We won't be working with paste, join, patch or aspell.  You can skip over these commands if you'd like.
** Save tr and sed for later.  They're too complex for right now.

* Watch - https://www.youtube.com/watch?v=tD8qzBmi-t0
** This video is a good command overview.  There's more in here than we need now, but we'll be using all of these commands at some point this semester
** He moves pretty fast, so be prepared to pause to catch the commands or slow down the video speed

Please make use of the discussion boards if you run into any trouble with these commands, especially next week when we need to start combining them in more complex ways.

Most shell metacharacters (the symbols on your keyboard)  have a special meaning.  Compiling a list of them with an explanation and example usage in this [[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]] as they are introduced might be helpful.  


! Notes

A lot of the power of the Unix environment comes from single purpose commands. The filter commands we are about to introduce are great examples. By combining these single-purpose commands we can build flexible and customized solutions to solve a wide range of problems.

By default, output from a command such as our text filters is displayed to the screen. By redirecting where that output is sent, we can chain commands together to creatively solve more complex problems.

Redirecting standard I/O is how we move data between filters and files. The following diagram illustrates our options:
[>img[img/stdout.png]]

This video explains the basics: https://www.youtube.com/watch?v=-Z5tCri-QlI

!! Standard input and standard output:
* Every filter should be able to accept input from any source and write output to any target
* Input can come from the keyboard, from another file, or from the output of another program
* Output can be displayed to the screen, can be saved to a file, or sent as input to another program
* This gives us great flexibility when combined with the simple approach to our tools

The standard source, Standard Input, is commonly abbreviated as STDIN.  The two output destinations, Standard Output and Standard Error, are commonly abbreviated as STDOUT and STDERR, respectively.  Collectively, all three are abbreviated as STDIO.

{{Warning{''Warning:'' Not every utility will accept input on STDIN, not every utility will output to STDOUT!  It is important to keep this in mind.  Generally, most system utilities such as {{Command{ls}}}, {{Command{mkdir}}}, and {{Command{cp}}} do not accept input on STDIN.  Only some of them will send output to STDOUT.  All tools which manipulate text (text filters) will utilize both STDIN and STDOUT.}}}


!! Redirection - moving input or output between a command and a file

We have new shell metacharacters to assist with the management of input and output:
* > : Redirect output - Send a command's output to a file, overwriting existing contents
** {{Command{ users > userlist }}}
** {{Command{ who > loggedin }}}
* {{Monospaced{>>}}} : Redirect output - Send a command's output to a file, appending to existing data
** {{Command{ who >> loggedin }}}
** {{Command{ (date ; who) >> loggedin }}}
* {{Monospaced{ < }}} : Redirect input - Take a command's input from a file
** {{Command{ tr  ' '  ,  <  userlist }}}
* Disable output by redirecting it to {{File{/dev/null}}}, the unix garbage can
** {{Command{ make > /dev/null }}}


!! Standard Error (STDERR)

Some commands use a separate data stream, STDERR, for displaying any error or diagnostic output.  Having this extra output on a separate stream allows us to handle it differently.  We can send STDOUT to one destination and STDERR to another. 

We can prefix our redirection symbols (''>'', ''>>'', or ''|'') with a ''2'' (the STDERR file descriptor) to send STDERR to a different destination.

For example, notice how the error message from the second command is discarded:

{{{
[root@shell ncs205]# id username
id: username: no such user

[root@shell ncs205]# id username 2> /dev/null

[root@shell ncs205]# id merantn 2> /dev/null
uid=7289(merantn) gid=100(users) groups=100(users),205(ncs205)

[root@shell ncs205]# id merantn 2> /dev/null 1> /dev/null
}}}


!! Command Execution
* Chaining commands (Pipelines):
** Workflows can be completed as a pipeline of simple tools
** Glue multiple commands together to perform complex tasks out of simple tools
** Send STDOUT of one command as STDIN to another with the  |  (pipe)  symbol
** First command must be able to send output to STDOUT and second command must be able to read input from STDIN
** Examples:
*** {{Command{ who | sort | less }}}
*** {{Command{ who | wc -l }}}
*** {{Command{ last | cut -d ' ' -f 1 | sort | uniq }}}
*** ''Does not work!  See yellow box above.'':  {{Command{ ls * | rm }}}
**** File manipulation utilities like rm do not work with STDIN and STDOUT
* Send to STDOUT and save to a file with the {{Command{tee}}} command
** {{Command{ df | grep mapper| tee fs }}}
** {{Command{ df | tee fs | grep mapper }}}
*Sequenced commands:  {{Command{ command1 ; command 2 }}}
**No direct relationship between the commands
**Do not share input or output.  Simply combined together on the same line
** {{Command{ echo Today is `date` > Feb ; cal >> Feb }}}
*Grouped commands: {{Command{ (command1 ; command2) }}}
** {{Command{ (echo Today is `date` ; cal ) > Feb }}}
** Run in a sub-shell - Launch commands in a new shell (any new settings or shell variables are not sent back to parent shell)
*** Observe the current directory after running this command sequence: {{Command{ ( cd / ; ls ) ; pwd }}}


!! Chaining Commands with text filters:

Build flexible and customized solutions to solve wide range of problems.
Unix filter tools are very useful for manipulating data
Filter definition:  any command that takes input one line at a time from STDIN, manipulates the input, and sends the result to STDOUT
To most effectively solve a problem, you must know the available tools.  Know the commands and be familiar with the options available.

When working with the filters to solve problems:
* Break the problem down into small parts
* Choose your tools
* Experiment
* Perfect and simplify your solution


!!! Core Filters:
* {{Command{cat}}} - concatenate one or multiple files
** {{Monospaced{-n}}} option - numbered lines
** create text files by redirecting output to a file
* {{Command{head}}} - display lines from the beginning of a file
** {{Monospaced{-n}}} - display first //n// lines
* {{Command{tail}}} - display lines from the end of a file
** {{Monospaced{-n}}} - display last //n// lines
** {{Monospaced{+n}}} - Begin display at line //n// 
** {{Monospaced{-f}}}  - do not stop at eof, continue displaying new lines.
* {{Command{grep}}} - pattern matching : //pattern// //files(s)//
** {{Command{grep //pattern// file1 file2 file3}}}
*** Example: {{Command{grep dog //file(s)//}}}
*** {{Command{w | grep ^d}}}
** {{Command{//command1// | grep //pattern//}}}
** Anchors: 
*** {{Monospaced{^}}} = begins with
*** {{Monospaced{$}}} = end with
**Useful options:
*** {{Monospaced{-v}}} : Invert the match
*** {{Monospaced{-i}}} : Case insensitive
*** {{Monospaced{-l}}} : list only file names
*** {{Monospaced{-H}}} : list file name with matched pattern
**Examples:
*** {{Command{grep -v '^$' /etc/printcap}}}
*** {{Command{ls -l | grep ^d}}}
*** {{Command{grep init /etc/rc*}}}
*** {{Command{cp `grep -l init /etc/rc*` scripts/}}}
*** words containing the string //book//
*** lines containing dog at the end of the line
* {{Command{sort}}} - sort lines of text files
**sort passwd file
**Options:  
*** {{Monospaced{-n}}} : Numeric
*** {{Monospaced{-r}}} : Reverse
*** {{Monospaced{-k}}} : sort on field #
*** {{Monospaced{-t}}} : Specify delimiter (default whitespace)
** Examples:
*** {{Command{sort  /etc/passwd}}}
*** {{Command{sort -t : -k 5 /etc/passwd}}}
*** {{Command{sort -n -t : -k 3 /etc/passwd}}}
* {{Command{uniq}}}  - filter out repeated lines in a file
**Must be sorted before showing unique values
**{{Monospaced{-c}}} : Count number of matches
* {{Command{wc}}} - word, line, character, and byte count
** {{Monospaced{-w}}} = word count
** {{Monospaced{-l}}} = line count
* {{Command{cut}}} - cut out selected portions of each line of a file, either range of characters or delimited columns
** Two main usage options: 
*** By delimited columns:
**** {{Monospaced{-d}}} : Specifies the delimiter (defaults to tab)
**** {{Monospaced{-f}}} : Specifies the field(s)
*** Range of characters:
**** {{Monospaced{-c}}} : Extract character ranges
** Examples: 
*** Extract field 2 through 4 from file data.txt, delimited by a semi-colon: {{Command{cut -d ';' -f 2-3 data.txt}}}
*** Extract characters 65 through end of line from the ~Fail2Ban log:  {{Command{cut -c 65- fail2ban.log}}}
* {{Command{strings}}} - Searching for strings in binary files
*Compare files
** {{Command{cmp}}} - compare two files
** {{Command{diff}}} - compare files line by line
** {{Command{comm}}} - select or reject lines common to two files

{{Note{''Note'': Abuse of the {{Command{cat}}} command, like demonstrated in [[Useless use of cat|https://en.wikipedia.org/wiki/Cat_(Unix)#Useless_use_of_cat]], is a sloppy practice that will cost points.  Commands which accept filenames as arguments do not normally need the {{Command{cat}}} command to provide input via a pipeline.

Consider these two command examples:
* {{Command{grep pattern file}}}
* {{Command{cat file | grep pattern}}}

The {{Command{cat}}} command provides no value to the second example and should not be used.  The first example is the proper way to accomplish this task.
 }}}


! Assignment

*  Complete [[Lab 14|labs/lab14.pdf]], [[Lab 15|labs/lab15.pdf]], and [[Lab 16|labs/lab16.pdf]]
** ''Note:'' Lab 16 is not due until next Wednesday.
! Review

Complete [[Lab 17|labs/lab17.pdf]] for some additional practice with the basic filters.


! Material

!! Read:
* Read Chapter 20, pp 299-307 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].  
** Stop after the grey box at the top of 307.
** The book uses advanced regular expressions for some of its sed examples.  Don't worry about understanding what the regular expressions do.  Stick with the simple examples like the ones on pages 301 to top of 303.
*** The first regular expression is at the top of page 305 and looks like this:  @@s/\([0-9]\{2\}\)\/\([0-9]\{2\}\)\/\([0-9]\{4\}\)$/\3-\1-\2/@@

!! Watch:
* {{Command{tr}}}:
** How to Use tr, sed, and aspell: Linux Terminal 201 - https://www.youtube.com/watch?v=F7Brrn-L1Zg
** Mostly for tr, but there's some talk about sed too
** Hak5 has a lot of great content.  Check out their other videos.
* {{Command{awk}}}: 
** Learning awk - https://www.youtube.com/watch?v=9YOZmI-zWok
** This video goes into advanced usage at the 11:30 mark that won't be covering in this class.  You can stop at that point if you'd like.
* {{Command{sed}}}:
** SED Tutorial Basic Substitution - https://www.youtube.com/watch?v=32waL1Z9XK0&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=1
** SED Substitute Beginning and End of Line: https://www.youtube.com/watch?v=8T5azKqYAjc&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=2
** SED Remove Lines When Match is Found: https://www.youtube.com/watch?v=37r5Ykdnlkk&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=13
** The entire sed series has great content, but those three are the highlights


! Notes

!! More complex filters

The {{Command{tr}}}, {{Command{awk}}}, and {{Command{sed}}} commands are a little more complex than the others we've introduced, but all three are important tools to have in your toolbox.  {{Command{awk}}} is easier to work with than cut for extracting columns of text.  {{Command{sed}}} is especially useful for search and replace operations and extracting particular rows of text from a file.

* {{Command{tr}}} - Translate characters
**Works only on stdin, does not alter the file, only the data stream
**Two arguments for translating characters  (set1/from)  (set2/to)
**Input characters in ''//set1//'' are mapped to corresponding characters in ''//set2//''.
**If the length of the two sets are unequal:
***''//set1//'' larger then ''//set2//'': ''//set2//'' is extended to the length of set1 by repeating ''//set2//'''s last character as necessary.
***''//set2//'' larger then ''//set1//'': Excess characters in ''//set2//'' are ignored.
**Options:
*** {{Monospaced{-d}}} : delete  (one argument for which characters to delete)
*** {{Monospaced{-s}}} : Squeeze multiple consecutive occurrences of a character down into a single instance.
** Character classes:
*** Another way to identify groups of characters
*** Page 260 & 261 in //The Linux Command Line// 
*** {{Monospaced{[:digit:]}}}
*** {{Monospaced{ [:alpha:] [:lower:] [:upper:] }}}
*** {{Monospaced{[:space:] [:blank:]}}}
**Examples:
*** {{Command{tr '[:upper:]' '[:lower:]' < /etc/printcap}}}
*** {{Command{tr '[:upper:]' '*' < /etc/printcap}}}
*** {{Command{tr -s '[:upper:]' '*' < /etc/printcap}}}
** Special characters
*** On the unix command line, {{Monospaced{''\t''}}} will represent a tab and {{Monospaced{''\n''}}} will represent a newline.  {{Command{tr}}} supports using these for substitutions.

{{Warning{''Important note:''  Most students have trouble with the {{Command{tr}}} command and interpret its actions incorrectly.  {{Command{tr}}} stands for translate, and as such it translates ''characters'' individually.  It does ''NOT'' translate strings.  There is a big difference between the two.

{{Command{tr}}} will individually translate the characters from the first argument into the characters in the second argument according to their placement.  The first character in argument 1 will be translated into the first character in argument 2.  The translation will proceed for each character in the first argument to the corresponding position in the second argument.

{{Command{cat data.txt | tr one two}}}  does not convert the string //one// to the string //two// in the output of {{File{data.txt}}}.  It converts each ''o'' to ''t'', each ''n'' to ''w'', and each ''e'' to ''o''.  Each of those characters in the output of {{File{data.txt}}} is changed individually.

When completing labs involving {{Command{tr}}}, it is important that your responses indicate these translations are happening //character by character//.  Additionally, the translation does not occur in the //file// {{File{data.txt}}}.  Our source files are not modified by the filters.  It is important to indicate the translation is occurring in the //output// of the file {{File{data.txt}}}.

Providing a response which is less then clear on these important points will be considered incorrect.}}}

!! {{Command{''sed''}}} & {{Command{''awk''}}}

I like using {{Command{''awk''}}} instead of {{Command{''cut''}}}.  Everything {{Command{''cut''}}} can do {{Command{''awk''}}} can do better.  Often our delimiters are variable lengths of whitespace, such as several spaces or several tabs.  {{Command{''cut''}}} can only delimit on a single character, but {{Command{''awk''}}}'s default delimiter is whitespace, regardless how long it is.  {{Command{''awk''}}} can also use multiple characters as a delimiter at the same time.

There's an [[Oreilly book|https://www.oreilly.com/library/view/sed-awk/1565922255/]] for just these two commands.  They're pretty powerful, but we're only going to scratch the surface.  We'll mostly work with {{Command{''awk''}}} but {{Command{''sed''}}} is good to know too.  Both come in very handy.


* {{Command{awk}}}
** {{Command{awk}}} is a fully functional programming language written for processing text and numbers.
** {{Command{tr}}} works byte by byte (1 character at a time)
** {{Command{grep}}} works line by line
** {{Command{awk}}} works field by field
** Terminology:
*** Record = line of input
*** Field = a column of data, separated by a delimiter
** basic usage:  {{Command{awk [-F//delim//] '{ action ; action ; action }' }}}
*** default action is to print the entire record
*** {{Monospaced{ ''-F'' }}} = specify alternate field separator (default is whitespace)
*** Multiple delimiters can be used.  For example, the option {{Monospaced{ ''-F'[-:]' '' }}} will set the delimiter to be either a colon or a dash.
*** ''Note:'' {{Command{cut}}} uses a single character for a delimiter where {{Command{awk}}}'s default is any amount of whitespace.  This is especially handy if a sequence of spaces or tabs is used between columns, such as in the output of the {{Command{w}}} command.
** advanced usage:  {{Command{ awk [-F//delim//] [ -v var=value ] '//pattern// { action ; action ; action }' }}}
*** //pattern// is an optional way to specify which lines to operate on
*** {{Monospaced{ ''-v'' }}} = define a variable and its value to be used within awk.  ex:  {{Monospaced{ ''-v start=10'' }}}
** Useful awk variables:
*** {{Monospaced{ ''$0'' }}} - The entire line of text
*** {{Monospaced{ ''$//n//'' }}} - The //n//^^th^^ data field of the record
*** {{Monospaced{ ''$0'' }}} - Entire line
*** {{Monospaced{ ''NR'' }}} - record number
** Patterns can be (advanced use only, I will not give problems in labs or tests that require this):
*** Relational expressions  ( {{Monospaced{ ''<=, <, >, >=, ==, !='' }}} )
**** ex:  {{Monospaced{ ''$1 == $2'' }}}
*** Regular expressions /regex/
**** Must be enclosed in {{Monospaced{ ''/ /'' }}}
**** When specified, the regex must match somewhere on the line.  example: {{Monospaced{ ''/[0-9]+/'' }}}
**** Or use a pattern matching expression ( {{Monospaced{ '' ~, !~'' }}} ) to match regex to a specific field.  example:  {{Monospaced{ ''$1 ~ /^string/'' }}}
** Examples:
*** Show only the username and tty from the output of the {{Command{w}}} command: {{Command{w | awk '{print $1 " " $2}' }}}
**** Same output, but skip the first two header lines:  {{Command{w | awk ' NR > 2 {print $1 " " $2}' }}}
*** Set the delimiter to be the string {{Monospaced{ ''", "''}}} (comma then space), then invert the first and last names: {{Command{awk -F", " '{print $2, $1}' names }}}


* {{Command{sed}}}:  Stream editior  //commands// //file(s)//
**Works mainly on streams, but can also be used to modify files in place when used with the {{Monospaced{ -i }}} option.
*** Be sure you are clear about this in your labs.  A response that indicates a change or deletion is occurring in the file will not be correct.  By default, changes are happening to the output of the file.
**We use {{Command{sed}}} to change the text in a stream.
**For each line in the //file//, check to see if it is addressed. If so, perform //command//
**[address1[,address2]] command [options]
***Addresses can be line numbers:  start[,stop]
***simple patterns:  {{Monospaced{ /pattern/ }}}
***The pattern can contain our ^ and $ anchors
***or regular expressions:  {{Monospaced{ /regex/ }}}
***Defaults to all lines if none are addressed
**Most used sed commands
*** {{Monospaced{s}}} - substitute - {{Monospaced{s/find/replace/flags}}}
**** flags:
**** {{Monospaced{g}}} - all instances on the line
**** {{Monospaced{p}}} - print lines containing a substitution
*** {{Monospaced{d}}} - delete line
*** {{Monospaced{p}}} - print line
*** {{Monospaced{y}}} - translate characters on the line (similar to {{Command{tr}}} command)
**Options:
*** {{Monospaced{-n}}} : suppress normal behavior and only show lines addressed and given {{Monospaced{p}}} command.
**sed examples:
*** {{Command{sed 7p file1}}} - print line 7 twice (notice absence of {{Monospaced{-n}}} option)
*** {{Command{sed '7d' file1}}} - delete line 7 from the output
*** {{Command{sed '/pattern/d' file1}}} - delete all lines containing //pattern// from the output
****Pattern can contain ^ and $ anchors and [sets]
****[sets] examples:  [abc]  [aeiou]  [~A-Z]  [a-z]  [A-z]   [0-9]
*** {{Command{sed -n '1,6p' file1}}} - only print lines 1 through 6 (notice the inclusion of the {{Monospaced{-n}}} option)
*** {{Command{sed 's/Sam/Bob/' file1}}}  -  All lines with Sam changed to Bob  (just once)
*** {{Command{sed 's/Sam/Bob/g' file1}}}  -  All lines with Sam changed to Bob  (all matches on the line)
*** {{Command{sed 's/Sam/Bob/gp' file1}}}  -  All lines with Sam changed to Bob  (all matches on the line).  Notice the lack of the {{Monospaced{-n}}} option.
*** {{Command{sed -n 's/Sam/Bob/gp' file1}}}  -  All lines with Sam changed to Bob  (all matches), only printing lines where the substitution occurred
*** For addressing lines, {{Monospaced{$}}} = last line in the output

{{Note{''Note:'' Always put your awk & sed commands (the first argument), within single quotes, for example:  {{Command{sed -n '4,6p' file1.txt}}} }}}


! Assignment

!! Review lab:
* [[Lab 17|labs/lab17.pdf]]

!! Complete:
*  [[Lab 18|labs/lab18.pdf]], and [[Lab 19|labs/lab19.pdf]]
** Lab 19 is not due until Saturday.
! Material

* Read, [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]:
** grep:  Chapter 6, pp 63 
** Anchors:  Chapter 19, pp 255


! Notes

!! Pattern Matching with {{Command{grep}}}:

The {{Command{grep}}} filter is one of the most useful; I use it almost daily.  It's worth spending a little more time working with and ensuring we're all on the same page with our terminology.  ''From here on, I'm going to start being a little more strict with wording, so be thorough with your explanations.''


!!! {{Command{grep}}} - Display all lines from a file or stream containing a specified pattern
* Usage: {{Monospaced{grep //pattern// [file1] [file2] [file3] [file//n//]}}}
* Search for //pattern// in each of the specified files
* Useful options:
** {{Monospaced{''-v''}}} : Invert the match; display lines which ''do not'' contain the specified pettern
** {{Monospaced{''-i''}}} : Case insensitive search
** {{Monospaced{''-l''}}} : list only names of files which contain a match
** {{Monospaced{''-H''}}} : include file name with matched pattern
* Examples:
** {{Command{grep dog data.txt}}} - Display all lines from the file ''data.txt'' containing the string ''dog''
** {{Command{grep ssh /etc/*.conf}}} - Display all lines from files ending with ''.conf'' within the directory ''/etc/'' containing the string ''ssh''

{{Note{
!!! ''Important Notes:''
!!!! 1)  The following all have distinct meanings.  Be sure to use them properly.
* ''Line'':  The entire line
* ''String'':  A sequence of characters
* ''Word'':  A sequence of characters with whitespace or punctuation on either side or at the beginning or end of a line.
* ''Characters'':  Individual characters, not necessarily together


!!!! 2)  By default, the grep filter will display all lines which match a particular pattern or string.  Be specific when describing its actions in the labs. 
For example, if you are asked to describe what the following command is doing:
<<<
{{Command{grep ab data.txt}}}
<<<
and your response is something vague and generic like "//finds ab in the file//" you will not receive full credit.  Be ''through and specific''!  What happens when a match is found?  Which file is being examined?  Where is the output going?  I've allowed vague descriptions in previous labs, but that must end as our familiarity with these tools is increasing.

A proper response will cover all points:

Display to ''STDOUT'' all ''lines'' containing the string ''ab'' from the file ''data.txt''


!!!! 3)  If multiple commands are chained together, don't just itemize what each command in the pipeline is doing.  Be sure to also describe its final outcome.  We must appreciate the big picture as well.
}}}


Chapter 10, pp 63 has more information on the grep command.


!!! Anchors:

When trying to match a pattern in a tool like grep, anchors allow us to specify where on the string a pattern must occur.  This is useful if we're trying to match something which appears at either the beginning or end of a line instead of somewhere in the middle.  

Anchors can be utilized with two anchor metacharacters:

* ^ = begins with
* $ = end with
* Examples:
** {{Command{grep '^string' data.txt}}} - Display lines from the file ''data.txt''  beginning with ''string''
** {{Command{grep 'string$' data.txt}}} - Display lines from the file ''data.txt''  which end with ''string''

Chapter 19, pp 255 contains more information on anchors.



! Assignment

* Complete:
** Labs:  [[Lab 20|labs/lab20.pdf]] & [[Lab 21|labs/lab21.pdf]]

If you have time, peek ahead to the [[Week 6, Part 1]] page and check out lab 22.  Lab 22 is a little tricky, so some extra time to chat about it in Discord may be helpful.
Let's take some time to slow down a bit, review old labs, and catch up.
! Review:

!! I/O Practice

Lab 22 is a practice lab for I/O and moving output from one command to another.  It will leverage material from the last two weeks to solve a real-world problem and is a good example of using these tools and concepts.  This lab will introduce the openssl command with a couple examples and then ask you to use it to return useful data.

!! Complete:
* [[Lab 22|labs/lab22.pdf]]


! Material:

!! Quoting:
* Read Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
* Watch:  Linux shell quotes: https://www.youtube.com/watch?v=1Ukw0IjGKsI

This half of the week is lighter than the second half.  Getting a head start on [[Week 6, Part 2]] would be wise.


! Notes:

!! Escaping & Quoting:

!!! Quoting - ' " \

Some special characters, such as the space and most symbols, have a special meaning to the shell. Occasionally we need to use those special characters literally without allowing the shell to interpret their special meanings.

Quoting allows us to protecting these special characters from the shell. It is necessary in order to use a metacharacter literally, to disable its special shell meaning.

For example, consider the scenario where you need to display the contents of a file which contains a space in the name.  The space has a special meaning to the shell; it is our argument separator.

If my file is named {{File{my notes.txt}}}, and I try to execute the command {{Command{cat my notes.txt}}} to display it, the space in the file name will cause cat to try to display the file {{File{my}}} and the file {{File{notes.txt}}}, neither of which actually exist.

I need to protect that special symbol, the space, from the shell to ensure the cat command get it.  There are three ways I can do so:

* {{Command{cat "my notes.txt"}}}
* {{Command{cat 'my notes.txt'}}}
* {{Command{cat my\ notes.txt}}}

Each of the options work a little differently.  Knowing these differences allows you to choose the best method for the task.


!!!! Three ways to quote:

* Backslash ({{Monospaced{\}}}) - Changes the interpretation of the character that follows
** {{Monospaced{\}}} is the escape character, which will disable special meaning of a shell special character.
** Converts special characters into literal characters and literal characters into special characters
** n vs \n
*** The {{Monospaced{\}}} will //enable// the special meaning of a regular character.  
*** Newline - {{Monospaced{\n}}}
*** Tab - {{Monospaced{\t}}}
** {{Command{printf "Home is %s\n" $HOME}}}
** {{Monospaced{\}}} followed by return - suppress the special meaning of the return key
* Double Quote (weak) - Will remove the special meaning of //some// metacharacters
** {{Monospaced{"}}} quoting will still evaluate variable, command, and history substitution.
* Single Quote (strong) - Will remove the special meaning of //most// metacharacters
** {{Monospaced{'}}} is stronger then {{Monospaced{"}}}, which means it will protect more metacharacters from the shell
** {{Monospaced{'}}} quoting will only evaluate history substitution in //some// shells.  The single quote will not evaluate history substitution in {{Command{bash}}}
* You can alternate quotes to include the other type: {{Command{echo "Today's date is `date`"}}}

You can read about them in Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] or watch this video:  [[Linux shell quotes|https://www.youtube.com/watch?v=1Ukw0IjGKsI]].


! Assignment:

!! Complete:
* Labs:  [[Lab 22|labs/lab22.pdf]] & [[Lab 23|labs/lab23.pdf]]
! Material

* Read Chapter 10 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
* Watch:
** [[Linux Terminal 101: Controlling Processes|https://www.youtube.com/watch?v=XUhGdORXL54]]
** [[Linux Terminal 101: How to View Processes|https://www.youtube.com/watch?v=Udr-qE0NEO0]]

! Notes

We're going to take a break from filters and managing text for a moment to review some system utilities and concepts.  Our next material contains information on process management and job control.  This will become especially useful once we start shell scripting and managing systems.  This material will assist you with running multiple simultaneous tasks on the system and monitor system resources to ensure your scripts are not impacting performance.

Read Chapter 10 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].

!!! Note:
* In Chapter 10 they refer to the practice command {{Command{xlogo}}} which is not installed on the shell server.  Instead of using that command, run {{Command{less /var/log/messages}}} instead.  
* Don't get too bogged down on the Signals section, pages 117 through the top of page 120.  We'll revisit this later.

Watch: 
* [[Linux Terminal 101: Controlling Processes|https://www.youtube.com/watch?v=XUhGdORXL54]]
** Use a command like {{Command{less}}} or {{Command{tail -f}}} to test her examples.  We do not have {{Command{gedit}}} available.
* [[Linux Terminal 101: How to View Processes|https://www.youtube.com/watch?v=Udr-qE0NEO0]]


''Processes'' and ''Jobs'' have simple, fundamental differences:
 - Processes refer to all tasks currently running on the Linux system.  Every running task is considered a process and has a unique process ID.
 - Jobs are relative to your current login session.  They refer to current tasks you are running in that shell instance that may be running in the background, running in the foreground, or paused.  This presents an easy way to run multiple tasks at the same time from the same shell session or easily move between two tasks (eg: a text editor and a man page)


These are my notes from the last time I taught this class as a lecture.  Be sure to also read the chapter.

!! Processes
Everything on the system is represented by either a file or a process - something that physically exists or something that's running.
A process is a program that is executing
Files provide us with data, processes make things happen
The kernel manages processes - all processes are assigned a Process ID number (PID)
The kernel also maintains a process table to keep track of everything, indexed by that PID.

Processes are a mix of system and user processes.  In the process list, kernel process are listed within  [ ]
The kernel contains a scheduler to manage the workload and share system resources
The scheduler chooses a waiting process and grants it a short slice of time to run along with any needed resources (processor, memory, I/O, other devices, etc)
If the process is not completed within it's time slice, it goes back into the scheduling list and waits for additional processing time
time slice = short interval a process is allowed to run for.  

Every process is created (forked) from another process
Killing the parent process will (in most cases) kill the child processes
When a process dies or completes, it's resources are reallocated
init/systemd process, PID 1 - parent to all processes on the system
created early in the boot procedure to set up the kernel and complete booting functions

!! ps command
*ps - process status
*will show useful process information
*BSD options versus UNIX options
* These are some BSD options (also available in Linux)
**{{Command{ps}}} - Show processes for your current login session
**{{Command{ps -a}}} - Show all user processes
**{{Command{ps -u}}} - Display additional process information
***%CPU - percentage CPU usage
***%MEM - percentage memory usage
***VSZ - virtual size in Kbytes
***RSS - resident set size
***TT - control terminal name (tty//x////x//)
***STAT - symbolic process state
***TIME - accumulated CPU time, user + system
**{{Command{ps -x}}} - Display system processes
**{{Command{ps -aux}}} - Show extended information on all processes  - This is often the most useful way to use the command.
**{{Command{ps -U //username//}}}  - Display processes associated with //username//
**{{Command{ps -P //PID//}}}  - Display processes associated with process ID //PID//

*top - display and update information about the top cpu processes
**THR - 
**PRI - current priority of the process
**NICE - nice  amount  (in  the  range -20 to 20)
**SIZE - total size of the process (text, data, and stack) (in K)
**RES - current amount of  resident memory (in K)
**STATE - current process state (START, RUN, SLEEP, STOP, ZOMB,  WAIT, LOCK)
**C -  processor number on which  the  process  is  executing
**TIME - number of system and user cpu seconds that the process has used
**WCPU - weighted  cpu percentage

!! Killing Processes
*kill [-signal] //pid//
* Common Signals:
| !Signal Number | !Signal Abbreviation | !Description |
| 1 | HUP |Hangup (restart process, reload config)|
| 2 | INT |Interrupt (~CTRL-C)|
| 3 | QUIT |Quit|
| 9 | KILL |Immediate kill.  Not catchable or ignorable.|
| 15 | TERM |Request to gracefully terminate (default)|
SIGINFO = ~CTRL-T  (~FreeBSD Only)

* Killing large groups of processes
**{{Command{pkill}}} & {{Command{pgrep}}} - look up or signal processes based on name and other attributes
**{{Command{killall}}} - kill processes by name
*** This may not come installed by default in all Linux distributions.

[>img[img/jobs.png]]
!! Jobs & job control

*Jobs - a command or set of commands entered in one command line.
*jobs are related to the user's session and are not global.
*STDIN is locked while a job is running in the foreground - it is only available to the current job until it completes.
*running background jobs allow the user to access these resources and have control of the shell.
*background jobs will still send their output to the screen and must be brought back to the foreground if they request input from the user
*a job may have one of three states - foreground, background, and stopped.

* append ''&'' to the command string to run it directly to the background
*~CTRL-Z - suspend a running foreground process
*Related commands:
** {{Command{jobs}}}
** {{Command{fg}}}
** {{Command{bg}}}
** {{Command{kill -STOP %//id//}}}
* Manipulate jobs with ''%'' and the job number
** Examples:  {{Command{fg %1}}} - resume the first background job in the foreground
** {{Command{bg %2}}} - resume the second job in the background
*currency flags: ''+'' and ''-''
** ''+'' most recently accessed job, default job if no arguments are specified.
** ''-'' second most recently accessed job, default job when ''+'' flagged job completes.

!! Two additional recommended videos:

* Kill command video: https://www.youtube.com/watch?v=fT-h45L9RAY
* Difference between processes and jobs: https://www.youtube.com/watch?v=eqtiw8S8GZw


! Assignment

* Complete [[Lab 24|labs/lab24.pdf]]
! Material

!! History Substitution:
* Chapter 8 - The entire chapter has good info, but pay particular attention to pages 85 (bottom), 86 & 88
* Watch: Linux Shell Variables: https://www.youtube.com/watch?v=MbXofShhMv8

!! Variable Substitution:
* Read: Chapter 7, pp 74-75 and Chapter 25, pp 377-378
*Watch:  Linux History Explained:  https://www.youtube.com/watch?v=3BZzFRPYU_I

!! Command Substitution:
* Read: Chapter 7, pp 74-75
* Watch: Command substitution using backticks: https://www.youtube.com/watch?v=VOOeXV4HYSA


! Notes

!! Shell Substitutions

Substitutions are transformations the shell performs on input before a command string is fully executed.  When the Unix shell encounters a substitution metacharacter, it will evaluate it to perform any substitutions before executing the full command string. These substitutions allow us to expand filenames, evaluate variables, recall previous commands, or use the result of one command as an argument to another. We already discussed filename substitution (file globbing). History substitution is very useful for recalling previous commands without having to retype it. Variable and command substitution are used extensively in shell scripting and have a useful place on the command line.

As you work with these substitutions, keep in mind the echo command can be used to preview the command string the shell will be executing after all substitutions are performed.  Simply start your command string with {{Command{echo}}} to test it.  We did this in Lab 23, #4 with the cat dog rabbit wombat question.


!!! History substitution

History substitution allows us to quickly recall previously executed commands. Previous commands are saved in a buffer which is written to the file ~/.bash_history upon logout. This allows history to be preserved across sessions and is useful for an administrator who needs to inspect activity of users on the system.

* Read:
** Chapter 8 - The entire chapter has good info, but pay particular attention to pages 85 (bottom), 86 & 88
*Watch:
**Linux History Explained:  https://www.youtube.com/watch?v=MbXofShhMv8


!!! Variable substitution

Variable substitution allows data to be stored for later use, much like any other programming language. The main application here is for shell configuration settings and for use in shell scripting. Variable substitution is not used as much as the other substitution forms when working directly on the command line.

* Read:
** Chapter 7, pp 74-75 and Chapter 25, pp 377-378
* Watch:
** Linux Shell Variables: https://www.youtube.com/watch?v=3BZzFRPYU_I


!!! Command substitution

Command substitution allows us to use the result of one command as an argument to another. Backticks or {{Command{$( )}}} are used to execute an inner command first. That inner command (including the backticks) is replaced by its output. The full command string is then executed.

''Important note:''  The backtick ''`'' and the single quote ''''' look rather similar.  Be sure to approach this section with an eye for detail so you don't confuse the two.

Consider this example. I often work remotely and need to remotely power on my home Windows PC to retrieve some files or continue working with them. The wake-on-LAN function built into many motherboards allows for remote wake-up by broadcasting a specially crafted packet containing the system's MAC address to the broadcast address of the local subnet. Unix utilities exist to facilitate this. Their syntax is usually {{Command{//command// //~MAC-address//}}}.

I log into my home unix fileserver from a remote location via SSH. I have my PC's MAC address saved in a text file within {{File{/tmp/}}}:

{{{
# I can see that my PC's MAC address is saved in the text file named win7
root@trillian:/tmp # cat win7
c7:62:00:a2:25:55

# Rather then copy and paste, command substitution is a faster way to get that MAC address added to the command line as an argument to the wake command.  
# The shell will first perform the substitution, replacing `cat win7` with the output of the cat command.  Next, the full command string will be executed.
root@trillian:/tmp # wake `cat win7`

# I can preview the full result of my substitution by prefixing the command string with echo to see what will really be executed by the shell
root@trillian:/tmp # echo wake `cat win7`
wake c7:62:00:a2:25:55
}}}

An even better way involves combing history and command substitution:

{{{
# Preview my file, make sure the MAC address looks good
root@trillian:/tmp # cat win7
c7:62:00:a2:25:55

# History substitution will be used first to replace !! with the last executed command (cat win7).  
# Next, command substitution will replace the backticks with the result of executing the enclosed command (the MAC address)
# Finally, the full wake command string with the MAC address added as an argument will wake up my Windows PC.
root@trillian:/tmp # wake `!!`
}}}

* Read:
** Chapter 7, pp 74-75
* Watch:
** Command substitution using backticks: 
*** https://www.youtube.com/watch?v=VOOeXV4HYSA


! Assignment

!! Complete:
*  Complete [[Lab 25|labs/lab25.pdf]] & [[Lab 26|labs/lab26.pdf]]
! Material

!! The {{Monospaced{vi}}} Editor

{{Monospaced{vi}}} is the standard Unix text editor.  Extremely powerful and available on every system.

!!! Read :
 - Chapter 12 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
 - Review notes on [[The vi Editor]] page

!! The {{Monospaced{nano}}} Editor

{{Monospaced{nano}}} is easier to use than {{Monospaced{vi}}}, but not nearly as powerful.


!!! Read
 - Chapter 11 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] beginning on the bottom of page 136


!! Shell scripting

!!! Read:
 - Chapter 24 and 27 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!!! Watch:
 - [[Bash scripting basics|https://www.youtube.com/watch?v=NSu4IWlOU7k]]
 - [[Exit Status|https://www.youtube.com/watch?v=Fghnnrbag-w]]
 - [[If statements 1|https://www.youtube.com/watch?v=elrbjYdL-8c]] & [[If statements 2|https://www.youtube.com/watch?v=XDi9W0V-ibA]]


! Notes

!! Unix text Editors

The two main Unix command-line text editors are {{Command{vi}}} and {{Command{nano}}}.  The {{Command{vi}}} editor is the standard and is universally available on every Unix system.  It's extremely powerful, but has a bit of a learning curve and takes some time to get used to it.  The {{Command{nano}}} editor isn't as universally available, but can be installed on most Unix systems, is easier to learn, but not nearly as powerful.

If you will be working with the Unix command line in the future, especially professionally, becoming familiar with {{Command{vi}}} will be worthwhile.  Otherwise {{Command{nano}}} will be sufficient for this course.  

I have material available on the {{Command{vi}}} editor on [[this page|The vi Editor]] if you would like to review it.  There is an extra credit lab available there as well.

Using one of the text editors will be necessary to complete the next material:  shell scripting.  Pick whichever one you'd like.


!! Basic Script Building

We don't have a enough time to go deep into Shell Scripting, but it's an important topic to at least mention.  We'll spend this week on the highlights.  All of the chapters in Part 4 are worthwhile if you have the time and interest in further developing your shell scripting skills.  The [[Linux Command Line and Shell Scripting Bible|https://www.amazon.com/Linux-Command-Shell-Scripting-Bible/dp/111898384X]] is another good resource I've used for other classes.


Watch: [[Bash scripting basics|https://www.youtube.com/watch?v=NSu4IWlOU7k]]


!!! Outline
A shell script is an executable file that contains a list of shell commands
* It's an automated way to run a series of commands.
* Need to run a bunch of commands often?  Script it.
* Need to run a few complicated commands only occasionally?  Script it.

The file is interpreted by the shell specified, it is not compiled

There are [[two main shell families|img/shells.jpg]] - bourne and ~C-Shell
* We will be writing our scripts for the bourne/bash shell.
* The first line of the script contains the shebang, {{Command{#!}}}, specifying the path for the interpreter to use.  

A script could be as simple as a single command or a series of commands
eg: Instead of having to type out the find command to scan our home directory for old, large files, we can put it in a script.

{{{
#!/bin/sh
# ./oldcrud.sh

find ~ -atime +52w -size +50M -ls
}}}

- or -

{{{
#!/bin/sh
# ./oldcrud.sh
# This version accepts a command line argument to delete the files it finds.

if [ "$1" = "-d" ]
then
	find ~ -atime +52w -size +50M -delete
else
	find ~ -atime +52w -size +50M -ls
fi
}}}

!!! Executing a shell script:
* Scripts must at least be readable in order to run.  
* A program not in your path must be executed by prefixing the filename with path information, eg:  {{Command{./script.sh}}}
** Execute permission is required when executing scripts in this fashion.
* Scripts can also be executed as arguments to an interpreter, eg:  {{Command{sh script.sh}}}
** If the file is prefixed with the name of the interpreter, only read permission is required.

Three options to run a script:
# Place in $PATH (needs read & execute)
# {{Command{./scriptname.sh}}} (needs read & execute)
# {{Command{sh scriptname.sh}}} -or- {{Command{bash scriptname.sh}}} (needs only read)


!!! Variables
User variables can be set with {{Monospaced{''=''}}}
{{Monospaced{variable=value}}}  (''No spacing!''  Adding a space will result in a syntax error)
and referenced with {{Monospaced{$variable}}} or {{Monospaced{${variable} }}}.  Useful for supplying text immediately after the variable expansion:  {{Monospaced{echo ${size}mb }}}
{{Monospaced{variable=$(command)}}} or {{Monospaced{variable=`command`}}}  can save output from a command to the variable, eg: {{Monospaced{files=$(ls | wc -l)}}}
 -- We will be using a lot of command substitution.
{{Command{unset variable_name}}} - remove a variable
The {{Command{read}}} command will accept input from the keyboard:  {{Command{read //variablename//}}}
 -- Use {{Command{read}}} with the ''-p'' option to supply a prompt.
{{Command{export //variable_name//}}}  - export a variable to the shell for future use
{{Command{readonly //variable_name//}}} - prevent variable from being changed

Example script.  Prompt for user input and then compute a value.
{{{
#!/bin/sh
# ./nums.sh

read -p "Enter a number: " number1
echo you entered $number1

echo -n "Enter a number: " 
read number2
echo you entered $number2

echo
echo -n "Sum is: "
expr $number1 + $number2
}}}


{{Note{It's a best practice for variable names to describe what they contain.  Notice I used {{Monospaced{''$number1''}}} and {{Monospaced{''$number2''}}} for my variables above.  These variable names both describe what should be stored within them.  Using non-descriptive variable names such as {{Monospaced{''$a''}}} or {{Monospaced{''$var1''}}} will result in lost points.}}}


!!! Exit Status

Next we're going to begin to introduce three inter-related concepts: command exit status, the test command, and if statements in that order. We're going to use them in practice in the reverse order.

When constructing an if statement, most often we'll be using the test command to evaluate the conditions (eg, whether a file exists, or whether one number is greater than another). The test command will return an exit status to indicate whether that evaluation was true or false. The if command will then use that exit status to determine what to do.

Every command we execute on the system returns an exit status to indicate whether it ran successfully or not. The exit status is stored in the special shell variable {{Monospaced{''$?''}}}.

Exit status values fall within the range 0 - 255. An exit status of 0 always indicates success. A exit status greater than 0 indicates some form of failure. Having many possible values to indicate failure (any positive integer) allows the program to indicate either the type of failure or where the failure occurred.

Notice the difference between the two executions of the {{Command{id}}} command:

{{{
[user@shell ~]$ id root
uid=0(root) gid=0(root) groups=0(root)
[user@shell ~]$ echo $?
0

[user@shell ~]$ id root2
id: root2: no such user
[user@shell ~]$ echo $?
1
}}}

The first instance completed successfully, so we received an exit status of 0. The second instance returned an unknown user error and an exit status of 1.

Watch:  https://www.youtube.com/watch?v=Fghnnrbag-w


!!! Useful commands for shell scripting

!!!! {{Command{test}}} command:

The {{Command{test}}} command (also known as {{Command{[}}}), allows us to perform comparisons, check strings, or evaluate files on the system. It works by returning an exit status that an if statement checks to determine whether something completed true (successful, exit status of 0) or false (not successful, exit status > 0).
''Note:'' The test command has two names: {{Command{test}}} and {{Command{[}}} (square bracket). Both files exist on a unix system and you may see scripts written using either. {{Command{[}}} is the common way to represent the command. When the {{Command{[}}} is used, a closing ] must be placed at the end of the line. Remember: The {{Command{[}}} is an actual command! And like all commands, spaces must separate the command from its arguments.

With this example, we're first checking to see whether an item is a regular file. It fails (returns an exit status of 1) because it is not a regular file. Next we check to see whether the item is a directory. An exit status of 0 indicates success, confirming the item is a directory.

{{{
[user@shell ~]$ [ -f /tmp/lab23 ]
[user@shell ~]$ echo $?
1

[user@shell ~]$ [ -d /tmp/lab23 ]
[user@shell ~]$ echo $?
0
}}}

The {{Command{test}}} manpage will be a great resource for learning about the different comparisons or tests available.

{{Warning{''Warning:''  be sure to use quotes with string evaluation }}}


!!!! The {{Command{expr}}} command can evaluate an expression
perform integer math & comparisons
verify string input against a regular expression

:expr 5 + 5


!!! Control structures:

Shell scripts can utilize control structures common to all programming languages. This allows us to construct more complex scripts which can evaluate conditions or iterate over lists. The most basic of our control structures is the if statement. An if statement has three parts:
* the initial if test
* followed by one or more optional elif statements
* and ending with an optional else condition.

If statement synopsis:
{{{
if condition
then
   commands
elif condition
then
   commands
else
   commands
fi
}}}

If statement example:
{{{
#!/bin/sh
# ./exists.sh

if [ $# -ne 1 ]
then
	echo give me a file
	exit 1
fi

if [ -f $1 ] 
then
	echo "it's a regular file"
elif [ -d $1 ]
then
	echo "it's a directory"
elif [ -e $1 ]
then
	echo "don't know what this is"
else 
	echo "it doesn't even exist"
fi
}}}



!!! Script Writing

When writing your scripts, the following header ''must'' be placed at the top of the file, immediately after the shebang:
{{{
#!/bin/sh
# File name:
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#
#
#
}}}


!! Tips and good habits to start developing now:  
* Comment your script with {{Monospaced{#}}}.  Comments throughout your script make it easier for others to understand what's going on.
* Long lines should be wrapped. Cut long lines at about column 60. (makes it easier to read and print)
* Using meaningful variable names makes it easier to understand their purpose.  Use of generic variable names (eg: {{Monospaced{var1}}}) is bad form and will result in lost points.
* Use proper indentation within control structures.  Indenting your code, like done in the if-statements above, will make it easier to understand the logic and flow of your scripts.  This will also make it easier to spot logic errors.

! Assignment:

!! The vi Editor:
 - Complete [[Lab A1|labs/labA1.pdf]]
 - This lab is optional for additional vi practice and will be accepted for extra credit.  There is no firm due date, but please try to submit before the end of the month.

!! Shell scripting:
- Complete [[Lab 31|labs/lab31.pdf]] & [[Lab 32|labs/lab32.pdf]]


//''Note''//: Labs 27 - 30 were skipped.
! Material

!! Read:
* Chapter 28 - Reading Keyboard Input
* Chapter 32 - Positional Parameters

The [[Shell scripting best practices]] page will have useful information as you're writing your scripts.  These best practices will be considered when assigning grades.  Be sure to note the shell scripting grading rubric outlined at the top of [[Shell script submission requirements]].


! Notes

This portion will cover four main concepts:

 - Obtaining input from the user
 - Positional Parameters (obtaining input from command-line arguments)
 - for loops
 - Input Validation (optional)

!! Obtaining input from the user:

Chapter 28 covers the {{Command{read}}} command, along with a lot of advanced detail that is beyond the scope of this class.  

The {{Command{read}}} command will accept input from the keyboard and save it to the specified variable:  {{Command{read //variablename//}}}
 - Use {{Command{read}}} with the ''-p'' option to supply a prompt.

Examples: 

{{{
#!/bin/sh
# ./nums.sh

read -p "Enter a number: " number1
echo you entered $number1

echo -n "Enter a number: " 
read number2
echo you entered $number2

echo
echo -n "Sum is: "
expr $number1 + $number2

read -p "Enter two numbers: " number1 number2

echo
echo -n "Sum is: "
expr $number1 + $number2
}}}


!! Positional Parameters

The read command will allow us to prompt for input.  Positional parameters (variables) will allow our scripts to obtain input from command-line arguments.  Chapter 32 will discuss them in more detail.


Special shell variables:
| !Variable | !Description |
| $0 |Name of the script|
| $1 - $9 |Values of command line arguments 1 - 9|
| $# |Total number of command line arguments|
| $* |Value of all command line arguments|
| $@ |Value of all command line arguments; each quoted if specified as "$@"|
| $? |Exit status of most recent command|
| $$ |Process ID of current process|
| $! |PID of most recent background process|


Command line arguments can be passed to a shell script and stored in $1 through $9
{{{
#!/bin/sh
# ./vars.sh
echo $1 - $2 - $3
echo All command line arguments: $*
echo Number of command line arguments: $#
echo Name of the current command: $0
echo Exit status of the previous command: $?

[root@shell ~]$ sh vars.sh first orange third wolf
first - orange - third
All command line arguments: first orange third wolf
Number of command line arguments: 4
Name of the current command: vars.sh
Exit status of the previous command: 0
}}}


! Assignment

!! Complete
* Complete [[Lab 33|labs/lab33.pdf]] & [[Lab 34|labs/lab34.pdf]] 
* Complete [[Lab 35|labs/lab35.pdf]] - ''Due Wednesday, Mar 13''
** ''Be sure to follow the [[Shell script submission requirements]] to submit your shell script labs''

{{Note{''Note:'' Lab 34 pro tip - avoid typing out long commands or scripts.  Typing out long commands manually is a great way to introduce typos and break things.  Use copy/paste instead.  If you haven't noticed yet, the [[Linux Shortcuts]] page covers how to copy/paste in putty/Linux.}}}
! Version Control with git

!! Online videos

At least watch these two:
* Fast Intro: https://www.youtube.com/watch?v=hwP7WQkmECE
* A little more detailed intro: https://www.youtube.com/watch?v=USjZcfj8yxE

''Note:''  We have git already installed on the class shell server.  There is no need to install it on your system.

Feel free to create a scratch space in your home directory if you would like to work along with the video.


----
Optional, if you'd like even more detail
* Part 1: https://www.youtube.com/watch?v=hrTQipWp6co
* Part 2: https://www.youtube.com/watch?v=1ibmWyt8hfw
** When working with our ~GitHub clone, don't mess with the usernames, ~URLs, tokens, or SSH keys like the video is suggesting.  The documented steps below will work for you.
* Part 3: https://www.youtube.com/watch?v=Q1kHG842HoI

''Two additional notes about these three videos'':
# You don't have to create a ~GitHub account.  We will be working with our own ~GitHub clone that will mirror a lot of its functionality.
##  If you'd like to create an online repository and follow along with the video, scroll down to the //Setting up your Git repository// section below for how to access your account on our clone.  Just use a different name for your demo repo.
# He uses a GUI IDE tool for code development where we use the command line.  These videos are more to explain the concepts.  The directions below will explain the methods for how to apply these concepts to the command line.
----



!! Introduction to Version Control

Version control is a system that allows you to manage changes to files and code over time. It's an essential tool for anyone who works with code or digital content, as it provides a way to track changes, collaborate with others, and restore previous versions of your work. With version control, you can work on a project with others without worrying about conflicting changes, easily revert to a previous version of your work, and keep track of who made changes and when. 

While it's primary use may be for programming projects, it's also useful for any application that involves tracking the evolution of changes to plain text files, such as:

* My resume and lab manuals for other classes are written in ~LaTeX, a plain text markup language, which can then be tracked in a version control repo
* I use the Markdown language for some technical documentation.  This gives basic formatting while keeping the documents plain text
* Our class website is a single HTML file that is checked in to a repo as changes are made.  The comment posted to the repo is also sent as a notification to Discord.
* Many of my server configuration files are tracked in repos
* Infrastructure as Code - text files can be used as the basis for creating and configuring server and network infrastructure
* Diagram as code - python can be used for generating diagrams: https://diagrams.mingrammer.com/


!! Introduction to Git

Git is a free and open-source version control system that is widely used in software development. It was created by Linus Torvalds, the creator of the Linux kernel, and designed to handle projects of all sizes, from small personal projects to large-scale enterprise applications.

Git allows developers to track changes made to code and collaborate on projects with other developers. It does this by creating a repository, which is a directory that contains all files and version history of a project. Developers can make changes to the code, save those changes in a commit, and then push those changes to a remote central repository. Git also allows developers to create branches, which are separate lines of development that can be merged back into the main codebase.

Benefits of a central version control system are:

* ''Collaboration'': Git makes it easy for developers to work collaboratively on a project. Multiple developers can work on the same codebase at the same time, making changes to the same files without interfering with each other's work. Git keeps track of all changes made to the codebase, making it easy to merge changes and resolve conflicts.
* @@''Version Control''@@: Git allows developers to keep track of all changes made to the codebase over time. Each commit in Git represents a snapshot of the code at a particular point in time. This makes it easy to roll back to a previous version of the code if necessary.
* @@''Backup''@@: Git provides a backup system for codebases. All changes made to the codebase are saved in a repository, even if they are later undone. This means that developers can always go back to a previous version of the code if necessary and easily compare different versions of code.
* ''Experimentation'': Git makes it easy to experiment with different ideas without affecting the main codebase. Developers can create branches to work on new features or ideas, and merge them back into the main codebase if they are successful.
* ''Flexibility'': Git can be used for any type of project, regardless of its size or complexity, on any operating system. It is a flexible tool that can be adapted to suit the needs of any project.

We will focus on the concepts above highlighted @@in yellow@@


!!! Basic Git terminology

* @@''Repository''@@ - A repository is a directory where Git stores all files and version history of a project.
* @@''Commit''@@ - A commit is a saved snapshot of the changes made to a file or files in the repository.
* ''Branch'' - A branch is a separate line of development that allows multiple developers to work on the same project without interfering with each other's work.
* ''Merge'' - Merging is the process of combining two branches to create a single branch with the changes from both branches.
* @@''Remote''@@ - A remote is a Git repository that is hosted on a remote server, such as ~GitHub or ~GitLab.
* @@''Clone''@@ - Cloning is the process of copying a repository from a remote server to a local machine.
* @@''Push''@@ - Pushing is the process of uploading changes made to a local repository to a remote repository.
* ''Pull'' - Pulling is the process of downloading changes made to a remote repository to a local repository.
* ''Fork'' - Forking is the process of creating a copy of a repository on a remote server, which can be used for experimentation or collaboration.
* ''Checkout'' - Checking out is the process of switching between different branches or commits in a repository.

We will focus on the terms above highlighted @@in yellow@@


!!! Basic Git Commands
* {{Command{git add //file(s)//}}} - Add a file to a repository
* {{Command{git rm //file(s)//}}} - Remove a file from a repository
* {{Command{git mv //oldfile// //newfile//}}} - Rename a file in a repository
* {{Command{git commit //file(s)//}}} - Record any changes to the repository
* {{Command{git status}}} - Display the status and information about the working tree
* {{Command{git log}}} - Display the commit logs
* {{Command{git remote}}} - Set the location of the remote repository
* {{Command{git push}}} - Send committed changes to the remote repository
* {{Command{git diff}}} - Compare changes between commits as well as between the current working version and committed version of a file.

Linux manpages are available for each of these {{Command{git}}} sub-commands.  Put a dash between the main command {{Command{git}}} and the sub-command you want to learn more about.  For example, {{Command{man git-add}}}


!!! ~GitHub

Git and ~GitHub are related but different tools.  

''Git'' is the command-line tool that allows developers to create a repository, make changes to code, and save those changes in commits. Git is designed to work locally on a developer's machine, but it can also be used to collaborate with other developers using remote repositories.

''~GitHub'' is a web-based platform that provides central hosting for Git repositories. It allows developers to create and host remote repositories, collaborate with other developers, and manage projects. ~GitHub provides a user-friendly interface that makes it easy to manage Git repositories.

~GitHub provides developers with additional features, including:
* Cloud storage for your repository
* Web-based interface for managing repositories and projects
* Collaboration features, including pull requests, code review, and issue tracking
* Documentation features, such as a project wiki
* Integration with other tools, including CI/CD pipelines, text editors, and project management tools
* Social features, including profiles, followers, and open-source contributions

We will work with a local, self-hosted clone of ~GitHub called //Gogs//.  Everyone will have an account on our local clone where you can create your own repositories.


!! Setting up your Git repository

* Log into our Gogs instance at https://git.ncs205.net:3000/user/login
** Use your campus username
** Your initial password can be found in the file {{File{~///username//.git.txt}}}
*** //username// is your campus username
** You may change your password after logging in.  Be sure to record it for future use.
* Create a new repository from within Gogs.  Click the ''+'' within the orange box
[img[img/gogs01.png]]

* Call your new repository //''scripts''//, configured as follows.  Add a description if you'd like.
[img[img/gogs02.png]]

* You should now have an empty repository in our online Git server.  We must now clone it on our class shell server and add our script files.

{{Warning{''Note:'' If this was a real project, we would most likely clone with SSH and utilize an SSH keypair for authentication.  This is a bit more work to set up, but is more secure and easier to use going forward.  We will instead choose the HTTPS method now and sacrifice some security for ease of use while we're learning new git concepts.  Just be aware some of the authentication choices we are making now are only to allow us to focus more on git and not get distracted with more advanced SSH concepts.  Our authentication choices here are not an endorsement of these methods for future projects.  Use SSH keypairs instead for those.}}}

> Change to your {{File{bin}}} directory: {{Command{ cd ~/bin/ }}}
> A README file is a place to document your repository.  Content is recorded in [[Markdown|https://www.markdownguide.org/basic-syntax/]].  Create an empty file: {{Command{ touch README.md }}}
> Initialize the new repository: {{Command{ git init }}}
> Add the README to the repo: {{Command{ git add README.md }}}
> Add your current Lab 35 scripts to the repo: {{Command{ git add ncs205-lab35-q0?.sh }}}
> Display the status of changes awaiting commit.  You should see the README.md and your three script files listed as //new file//: {{Command{ git status }}}
> Commit your changes with the message //first commit//: {{Command{ git commit -m "first commit" }}}
> Set the remote location to push your changes to.  ''Be sure to change my username to yours.'' {{Command{ git remote add origin https://git.ncs205.net:3000/merantn/scripts.git }}}  
> Configure git to cache your credentials in memory: 
> &nbsp;&nbsp;&nbsp; {{Command{ git config &#45;-global credential.helper cache }}}
> &nbsp;&nbsp;&nbsp; {{Command{ git config &#45;-global credential.helper 'cache &#45;-timeout=3600' }}}
> ''Optional'' - If you prefer {{Command{nano}}} instead of {{Command{vi}}}, set git to use nano as the default editor: {{Command{ git config &#45;-global core.editor nano }}}
> For simplicity while we're learning, we'll do all of our work out of the master branch: {{Command{ git config &#45;-global push.default current }}}
> Send your changes to the remote repo:  {{Command{ git push }}}

Here is my run through these steps:

{{Commands{
[merantn@lab ~/bin]$ ''git init''
Initialized empty Git repository in /home/merantn/bin/.git/

[merantn@lab ~/bin]$ ''git add README.md''

[merantn@lab ~/bin]$ ''git add ncs205-lab35-q0?.sh''

[merantn@lab ~/bin]$ ''git status''
 # On branch master
 #
 # Initial commit
 #
 # Changes to be committed:
 #   (use "git rm &#45;-cached <file>..." to unstage)
 #
 #       new file:   README.md
 #       new file:   ncs205-lab35-q01.sh
 #       new file:   ncs205-lab35-q02.sh
 #       new file:   ncs205-lab35-q03.sh
 #

[merantn@lab ~/bin]$ ''git commit -m "first commit"''
[master (root-commit) 021c95d] first commit
 Committer: Nick Merante <redacted>
Your name and email address were configured automatically based
on your username and hostname. Please check that they are accurate.
You can suppress this message by setting them explicitly:

    git config &#45;-global user.name "Your Name"
    git config &#45;-global user.email you@example.com

After doing this, you may fix the identity used for this commit with:

    git commit &#45;-amend &#45;-reset-author

 4 files changed, 3 insertions(+)
 create mode 100644 README.md
 create mode 100755 ncs205-lab35-q01.sh
 create mode 100755 ncs205-lab35-q02.sh
 create mode 100755 ncs205-lab35-q03.sh

[merantn@lab ~/bin]$ ''git remote add origin https://git.ncs205.net:3000/merantn/scripts.git''

[merantn@lab ~/bin]$ ''git config &#45;-global credential.helper cache''

[merantn@lab ~/bin]$ ''git config &#45;-global credential.helper 'cache &#45;-timeout=3600'''

[merantn@lab ~/bin]$ ''git config &#45;-global push.default current''

[merantn@lab ~/bin]$ ''git push''
Counting objects: 6, done.
Compressing objects: 100% (2/2), done.
Writing objects: 100% (6/6), 416 bytes | 0 bytes/s, done.
Total 6 (delta 0), reused 0 (delta 0)
To https://git.ncs205.net:3000/merantn/scripts.git
 * [new branch]      HEAD -> master

}}}


You can now view your repository online and see the files you've just checked in:

[img[img/gogs03.png]]


!!! Committing changes to the repository

New scripts can be added to the repository with the {{Command{git add}}} command above and changes to existing files can be committed with {{Command{git commit}}}.  Adding files and checking in changes can serve as a checkpoint and backup to roll back to in case things go awry.  It's wise to periodically check in changes at important milestones or before large adjustments are to be made so you have a good checkpoint to roll back to in case things don't work out.  

Here is an example of a change being made to an existing file.  I forgot to add the shebang to the top of one of my scripts.  I will first make that change, and then check the new file into the repository:

1. Update the script.  It now contains the missing shebang:
{{Commands{
[merantn@lab ~/bin]$ ''head ncs205-lab35-q03.sh''
 #!/bin/sh
 # File name: ncs205-lab35-q03.sh
 # Author: Nick Merante
 # Date Written: Mar 4, 2023
 # Assignment: Lab 35
 # Purpose: Calculate product of two integers
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
 #
}}}

2. We can compare my new version with the version currently checked into the repo:
* Lines beginning with a {{Monospaced{''+''}}} represent additions
* Lines beginning with a {{Monospaced{''-''}}} represent deletions.
Notice the {{Monospaced{''+''}}} before the new shebang I just added:

{{{
[merantn@lab ~/bin]$ git diff ncs205-lab35-q03.sh
diff --git a/ncs205-lab35-q03.sh b/ncs205-lab35-q03.sh
index 806d2a4..bc66030 100755
--- a/ncs205-lab35-q03.sh
+++ b/ncs205-lab35-q03.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
 # File name: ncs205-lab35-q03.sh
 # Author: Nick Merante
 # Date Written: Mar 4, 2023
}}}

3. Commit your change by executing one of these methods:
* Commiting just the changed files you specify: {{Command{ git commit ncs205-lab35-q03.sh }}}
* Committing all changes: {{Command{ git commit -a }}}

4. A text editor will open for you to add a comment to your commit.  This is a place to describe the changes you're making.  Comments help track what changes were made and why they were made.  
* Add your comment
{{{
Adding missing shebang
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
#
# Committer: Nick Merante <merantn@lab.ncs205.net>
#
# On branch master
# Changes to be committed:
#   (use "git reset HEAD <file>..." to unstage)
#
#       modified:   ncs205-lab35-q03.sh
#
}}}

* Save and quit the editor to finalize your commit
{{{
[master 7690513] Adding missing shebang
 Committer: Nick Merante <merantn@lab.ncs205.net>
Your name and email address were configured automatically based
on your username and hostname. Please check that they are accurate.
You can suppress this message by setting them explicitly:

    git config --global user.name "Your Name"
    git config --global user.email you@example.com

After doing this, you may fix the identity used for this commit with:

    git commit --amend --reset-author

 1 file changed, 1 insertion(+)
}}}

5. Push your changes to the online repository with {{Command{ git push }}}

{{Commands{
[merantn@lab ~/bin]$ ''git push''
Counting objects: 5, done.
Compressing objects: 100% (3/3), done.
Writing objects: 100% (3/3), 301 bytes | 0 bytes/s, done.
Total 3 (delta 2), reused 0 (delta 0)
To https://git.ncs205.net:3000/merantn/scripts.git
   14f729c..7690513  HEAD -> master
}}}


!!! Reverting a failed change to a file in the working directory

Let's suppose I make a change to one of my files that doesn't work out.  I haven't commited that change yet, and I want to revert my working copy to the last checked-in version.

1. Make your change

For simple demonstration purposes, I added a new line to the description (highlighted yellow below).  A more common use of this would be to undo several failed changes throughout a piece of code:

{{Commands{
[merantn@lab ~/bin]$ ''head ncs205-lab35-q03.sh''
 #!/bin/sh
 # File name: ncs205-lab35-q03.sh
 # Author: Nick Merante
 # Date Written: Mar 4, 2023
 # Assignment: Lab 35
 # Purpose: Calculate product of two integers
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
 #   @@- some major goof I want to undo@@
}}}
{{{
[merantn@lab ~/bin]$ git diff ncs205-lab35-q03.sh
diff --git a/ncs205-lab35-q03.sh b/ncs205-lab35-q03.sh
index bc66030..3ffd4a5 100755
--- a/ncs205-lab35-q03.sh
+++ b/ncs205-lab35-q03.sh
@@ -7,4 +7,4 @@
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
-#
+#   - some major goof I want to undo
}}}

2. Undo that change, reverting to the last checked-in version.  Notice my addition to the Description is now missing and there are no differences between the local and checked-in copies:
{{Commands{
[merantn@lab ~/bin]$ ''git checkout &#45;- ncs205-lab35-q03.sh''

[merantn@lab ~/bin]$ ''head ncs205-lab35-q03.sh''
 #!/bin/sh
 # File name: ncs205-lab35-q03.sh
 # Author: Nick Merante
 # Date Written: Mar 4, 2023
 # Assignment: Lab 35
 # Purpose: Calculate product of two integers
 # Description:
 #   - Prompt user for input of two integers
 #   - Calculate and display product
 #

[merantn@lab ~/bin]$ ''git diff ncs205-lab35-q03.sh''
[merantn@lab ~/bin]$
}}}


!!! Comparing differences between previous commits:

Differences in a file between two previous commits can be compared.

1. First display the commit log for your file to obtain the commit ID, highlighted in yellow below
{{Commands{
[merantn@lab ~/bin]$ ''git log ncs205-lab35-q03.sh''
commit @@7690513d3642de5a83342dd16c85dcf506cdf95e@@
Author: Nick Merante <merantn@lab.ncs205.net>
Date:   Sun Mar 5 15:00:23 2023 -0500

    Adding missing shebang

commit @@14f729c85b6ea4156db49ccf69583f7822a951c5@@
Author: Nick Merante <merantn@lab.ncs205.net>
Date:   Sun Mar 5 14:44:25 2023 -0500

    add header

commit @@d7105d4ae4f04807b1fa9f30171677a396a26de8@@
Author: Nick Merante <merantn@lab.ncs205.net>
Date:   Sun Mar 5 14:35:53 2023 -0500

    first commit
}}}

2. Use the commit messages to identify the points in time you would like to compare, providing the two commit ~IDs and the filename to the next command.  Below, I can see the line that was removed and the lines which were then added.
{{{
[merantn@lab ~/bin]$ git diff d7105d4ae4f04807b1fa9f30171677a396a26de8 14f729c85b6ea4156db49ccf69583f7822a951c5 ncs205-lab35-q03.sh
diff --git a/ncs205-lab35-q03.sh b/ncs205-lab35-q03.sh
index 340c263..806d2a4 100755
--- a/ncs205-lab35-q03.sh
+++ b/ncs205-lab35-q03.sh
@@ -1 +1,9 @@
-echo fake demo script 3
+# File name: ncs205-lab35-q03.sh
+# Author: Nick Merante
+# Date Written: Mar 4, 2023
+# Assignment: Lab 35
+# Purpose: Calculate product of two integers
+# Description:
+#   - Prompt user for input of two integers
+#   - Calculate and display product
+#
}}}


!!! README files

The file {{File{README.md}}} can contain documentation for your project in [[Markdown format|https://www.markdownguide.org/basic-syntax/]].  This content will also be displayed in the git web UI for the files in that directory.

1. Edit the {{File{README.md}}} file and add a synopsis for each of your scripts, similar to my example below.  Notice the use of [[Markdown|https://www.markdownguide.org/basic-syntax/]] in the document.
{{{
[merantn@lab ~/bin]$ cat README.md
# NCS205 Scripts

## Lab 35
1. `ncs205-lab35-q01.sh` - Turn up to 9 words into uppercase
2. `ncs205-lab35-q02.sh` - Accept two integers as arguments and compute their product
3. `ncs205-lab35-q03.sh` - Prompt for two integers and compute their product
}}}

2. Commit your new {{File{README.md}}} file

3. Push the changes to the repository

4. Observe the new Readme in the web UI:

[img[img/gogs04.png]]


! Assignment

1. Be sure [[Lab 35|labs/lab35.pdf]] has been completed
2. Get familiar with {{Command{git}}} and our online repository
3. Check in your ''Lab 35'' scripts and a proper {{Monospace{''README''}}} file following the instructions above
4. Experiment with committing and rolling back changes.  We will use this more with future material

Also Complete [[Lab 36|labs/lab36.pdf]] after working with your repo.
! Material

!! Read:
* Chapter 33 - For Loops
* Chapter 30 - Troubleshooting
** This is optional, but may be helpful.  Especially the first five pages in the chapter.
!! Watch:
* [[Bash for-loops 1|https://www.youtube.com/watch?v=sIYmF32Ic8s]] & [[Bash for-loops 2|https://www.youtube.com/watch?v=HLFenK13VDY]]

! Notes

!! For loops

The bash for-loop is a control structure which allows us to iterate through items in a list.  The list can be populated statically (from strings you define directly) or as a result of any form of shell substitution (variable, file, or command).  Within the for loop, your commands will then be executed for each item in the list. 

Watch:  [[Bash for-loops 1|https://www.youtube.com/watch?v=sIYmF32Ic8s]] & [[Bash for-loops 2|https://www.youtube.com/watch?v=HLFenK13VDY]]

For Loop synopsis:
 - //list// is a list of items to iterate over
 - //variable// is a variable to store the current list item in as we run through the commands within the for loop.   
{{{
for variable in list
do
   commands
done
}}}

For example, this small script will iterate over a list of PNG files in the directory to change their extension from .png to .jpg
{{{
#!/bin/sh
# ./renpng.sh

# list of the contents of the directory.  We will disable STDOUT and STDERR because we're only interested in the exit status.
ls *.png >&- 2>&-

# This is a short-circuited conditional to evaluate the exit status of the ls command.  Essentially, if there are no PNG files in the current
# directory, the exit status from the ls command will be 0.  This operates like a truth table, executing the commands in the list until one 
# returns a positive exit status.  If there are no files in the directory (exit status != 0), display a error message and exit the script. 
# This basic test ensures our script exits gracefully if there are no files
[ $? -ne 0 ] && echo Error: No PNG files && exit 1

# Iterate over each png file in the directory, saving the file to operate on in the variable $png
for png in *.png
do
	# The basename command removes an extension from a filename.  We're stripping off the png extension so we can add .jpg later
        filename=$(basename $png .png)
        mv $png ${filename}.jpg
        echo renaming $png to ${filename}.jpg
done
}}}


The {{Command{break}}} command executed within a loop will terminate it.   
The {{Command{continue}}} command will start the next iteration of a loop.
These are commonly used combined with if-statements to skip an item in the list or end the loop early.

For example, my script to collect your labs contains this at the top of the for-loop.  It will cause processing of a lab to stop if I already have it.

{{{
cd /opt/pub/ncs205/submit/
for file in ncs205-lab*.pdf
do
        hash=$(md5sum "$file" | cut -c 27-32)
        base=$(basename "$file" .pdf)
        [ -f $RSL_DIR/ncs205/collected/"$base"-v?-${hash}.pdf ] && continue

	# more processing commands follow
done
}}}


!! While loops

Here's a brief synopsis and example of a while loop.  We don't have time to cover them and none of our scripts will require them.  If-statements and for-loops are the two most useful to know.
 

While Loop:
{{{
while  condition
do
   commands
done
}}}

{{{
#!/bin/sh
# Create 100 png files

count=100
while [ $count -gt 0 ]
do
        string=$(printf "%04i" $count)
        dd if=/dev/urandom of=IMG_${string}.png bs=5k count=1 2>&-
        count=$(expr $count - 1)
done
}}}

The {{Command{break}}} command executed within a loop will terminate it.   
The {{Command{continue}}} command will start the next iteration of a loop.


!! Extra Commands:

* The {{Command{md5sum}}} command can be used to calculate file checksums
* The {{Command{stat}}} command can pull extra details about a file and allow for extra formatting options:
* The {{Command{date}}} command can display the current date and time in various formats.  Check it's manpage for a list of format codes.

{{{
# Obtain a unix timestamp integer for the file's modification time:
[user@lab ~]$ stat -c '%Y' ncs205-lab10-browng.pdf
1478659277

# Convert a unix timestamp to a human-readable format:
[user@lab ~]$ date +"%m/%d/%y @ %H:%M" --date=@1478659277
11/05/16 @ 21:44

# Display the current date & time as a unix timestamp integer:
[user@lab ~]$ date +"%s"
1478659280   


# Use in a shell script with command substitution:
mtime=$(stat -c '%Y' $file)
mdate=$(date +"%m/%d/%y @ %H:%M" --date=@$mtime)

# Display them:
echo "$mtime / $mdate" 
11/05/16 @ 21:44 / 1478659280
}}}


!! Input Validation

We've demonstrated two ways to gather script input directly from the user:  as command line arguments and via the read command.

Basic error checking of user input is always preferred.  Never rely upon your users to enter input appropriately.  It is far better to catch any potential issues yourself instead of having the script run into a syntax error.  For example, if your script requires a command line argument but one is not provided, things may break down.  If you ask for a number, what will your program do if a user enters a string of letters instead?  Not properly validating input is also the basis for a lot of attacks, especially against web servers.

A basic test to ensure a command line argument was provided and exiting the script gracefully with an error message if it wasn't would be good.

Basic tests should be done any time you are gathering input from a user.  Some examples:

* Does a file actually exist?
* Is a username valid on the system?
* Is an IP address in the proper format?
* Is a number actually only composed of digits?

The test command can evaluate whether a file exist.  The id command can evaluate whether a user is valid on the system.  

The expr utility can be used to validate input in our scripts based on a regular expression.  File globbing is a way to concisely match a group of files based on wildcards.  Similarly, regular expressions are a concise way to represent strings using wildcards and operators.  Understanding and constructing regular expressions is beyond the scope of this course.  The ideas are similar to file globbing but the implementation is different.  If you're curious, regular expressions are discussed in the textbook in Chapter 19.  

We'll be using the {{Command{expr}}} utility to compare two strings.  Addition with {{Command{expr}}} is composed of two operands (our input) and an operator (the + symbol), eg:  expr 3 + 5

The {{Command{expr}}} utility can also be used for string comparisons.  When comparing strings with {{Command{expr}}}, the first operand is the string we are checking.  This can either be a static word or come from a variable.  The {{Command{expr}}} operator for string comparisons is a : (colon).  The second operand is a pattern resembling the string we are expecting.

For example, the following command performs a comparison between the string on the left and the pattern on the right:

{{{
expr 123 : '^[0-9]\{1,\}$'
}}}

The pattern {{Command{^[0-9]\{1,\}$}}} requires a string that contains 1 or more numbers. The anchors ^ and $ are used to specify that no other characters can be present in the string. The command above will return the number of characters matched by the pattern, in this case 3.

{{{
$ expr 123 : '^[0-9]\{1,\}$'
3
}}}


If I add something besides a number to my string, the pattern will not match so the character count will be 0

{{{
$ expr a123 : '^[0-9]\{1,\}$'
0
}}}

We can then evaluate the output of the expr command to determine whether we received the correct type of input. An example of this follows:

{{{
#!/bin/sh
# ./adder.sh

if [ $# -ne 2 ]
then
        # Display proper script usage and terminate the script with an exit status of 1 if two command-line arguments are not provided.  
        echo "Usage: $0 integer1 integer2"
        exit 1
fi

# Test to make sure our arguments only contain digits.  Notice the use of anchors.  
# A positive number will be saved to the variables if there is a match
match1=$( expr $1 : '^[0-9]\{1,\}$' )
match2=$( expr $2 : '^[0-9]\{1,\}$' )

# Make sure both matches contain positive numbers
if [ $match1 -gt 0 -a $match2 -gt 0 ]
then
        sum=$(expr $1 + $2)
        echo
        echo $1 + $2 = $sum
        echo
else
        echo "You did not enter 2 integers"
        echo "Usage: $0 integer1 integer2"
        exit 1
fi
}}}

! Assignment

Be sure to properly add and maintain your scripts in Git.  Lab 42 will check that this has been occuring.

* Complete [[Lab 37|labs/lab37.pdf]], [[Lab 38|labs/lab38.pdf]] - Due Saturday
* Complete [[Lab 39|labs/lab39.pdf]], [[Lab 40|labs/lab40.pdf]] - Due Wed, Mar 27
* Complete [[Lab 41|labs/lab41.pdf]], [[Lab 42|labs/lab42.pdf]] - Due Sat, Mar 30

* An extra credit scripting lab is available - [[Lab A4|labs/labA4.pdf]]
** Follow the normal shell scripting process to submit it.  Use the uppercase ''A'' instead of a normal lab number in the file name you submit.
** There is no due date for this lab.  
! Material

* Read Chapter 19 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].

This material is a little more difficult to understand than most of what we'll be working on.  Make use of the discussion boards if you're running into trouble!  We can also spin up another online meeting if anyone would like to chat.

We only have enough time to skim the surface.  Regular expressions are very handy tools, especially when combined with our filters or when you need to perform complex text manipulations in vi.  I'd like to at least introduce the topic.

If anyone would like to dive deeper into this topic, I recommend [[Mastering Regular Expressions|https://www.oreilly.com/library/view/mastering-regular-expressions/0596528124/]].  It's an excellent technical book.


! Regular Expression Intro

* What is a regular expression?
** Compact way of specifying a pattern of characters
** text is evaluated against the pattern to determine if there is a match.
*** We can then perform actions based on that test
** We can use a regular expression to examine the contents of an entire file, such as with grep
** Or we can examine just a string (or variable), such as with input validation
* What are they used for?
** pattern matching - instead of searching for a simple word, search for more complex patterns - 
** Similar to how file globbing works.  Only here, we're examining the content of the files and using the metacharacters a little differently
** Complex substitutions (search & replace)
*** Helpful in vi or a Windows text editor such as notepad++
** Input verification
*** Make sure the user is providing an appropriate input.  For example, if we prompt for an IP address, ensure what's provided is a valid IP address.
* How are they used?
** {{Command{grep}}} - basic regular expressions 
** {{Command{egrep}}} or {{Command{grep -E}}} - extended regular expressions
** {{Command{sed}}}
** {{Command{vi}}}
** {{Command{perl}}}
** any other programming language

What to match:  a combination of ''atoms'' and ''operators''
''atom'' = something we're trying to match
''operator'' = how we're trying to match it
 
''Atoms'' - Any regular character, or:
| !Symbol | !Meaning | !Escape | !Not supported by |
| {{Monospaced{'' [ ] ''}}} |Character Classes (match any one character listed) | | |
|~|Characters may be specified singly or in ranges| | |
| {{Monospaced{'' [^ ] ''}}} |Negated character class (match any one character not listed| | |
| {{Monospaced{'' . ''}}} |Any single character| | |

''Operators'' - Modify what they follow
These operators act as quantifiers, specifying the number of times an atom is matched.

| !Symbol | !Meaning | !Escape | !Not supported by |
| {{Monospaced{'' ? ''}}} |Optional item.  Match 0 or 1. | | sed |
| {{Monospaced{'' * ''}}} |Repetition: 0 or more| | |
| {{Monospaced{'' + ''}}} |Repetition:  1 or more. | | sed |
| {{Monospaced{'' { } ''}}} |Repetition: Defined range of matches {//min//,//max//} or {//min//,} or {//exactly//}| * | |

| !Symbol | !Meaning | !Escape | !Not supported by |
| {{Monospaced{'' ( ) ''}}} |Grouping - next operator applies to whole group| | |
|~|Alternation (match any one of the sub-expressions)| | |
| {{Monospaced{'' {{{|}}} ''}}} |Or.  Match either expression it separates.  Use with ( )| | |

Grouping with the ''( )'' allow us to apply an operator to a group of atoms.  For example, to make the entire string ''abcde'' optional:  ''(abcde)?''.  The ''?'' will apply to the entire sequence of characters within the ''( )''.

{{Note{''Note:''  We use file globbing to identify groups of files and regular expressions to identify groups of characters.  File globbing is for file //names// and regular expressions are used for the file's //contents// or any other strings of characters.  }}}
{{Warning{''Warning:'' Regular expressions use many of the same metacharacters as file globbing, but they work differently here.  For example, if we want to list files which begin with the letter a, I would use the command {{Command{ls a*}}}.  But with regular expressions, if I want to find a string that begins with an a, I need to use the regular expression {{Monospaced{ ''^a.*'' }}}.  In file globbing, the {{Monospaced{'' * ''}}} stands alone.  In regular expressions it is a modifier and changes the behavior of what comes before it.  With regular expressions, the {{Monospaced{'' . ''}}} is used to specify any single character and the {{Monospaced{'' * ''}}} modifies it to match 0 or more occurrences.}}}


''Anchors'' can also be used to specify where our match must occur:
| !Symbol | !Meaning |
| ^ |Start of line|
| $ |End of line|
| \< |Beginning of word|
| \> |End of word|


Typically, when we are searching for a pattern, we are searching for a sub-string (a smaller string within a larger series of characters).  For example, if I want to display lines which contain //only// numbers, the command {{Command{egrep '[1-9]+' data.txt}}} will return the output:

12345
a1234
b1234c

But I want lines which contain //only// numbers.  I don't want to display the lines which also contain a letter.  The solution to prevent other characters from sneaking into your output is to use the anchors to ensure your regular expression is ''//completely matched//''.  This command will anchor the numbers to the beginning and end of the line so no other characters can be matched:  {{Command{egrep '^[1-9]+$' data.txt}}} 
 



[[Regular expression metacharacters]]
[[ASCII Chart|handouts/ascii-chart.gif]]

Regular expressions and file globbing have common metacharacters that have different meanings in the two contexts.  ''Be sure you know how the metacharacters differ.''


!! Examples

!!!! Match:
* grey with either spelling - {{Monospaced{'' gr[ea]y ''}}}
* color with American or British spelling - {{Monospaced{'' colou?r ''}}}
* variations of third street (3rd street or third street) - {{Monospaced{'' (3|thi)rd street''}}}
* The month June either full or abbreviated - {{Monospaced{'' June? ''}}}

* find words with 5 or more vowels in a row - {{Command{'' egrep '[aeiou]{5}' /usr/share/dict/words ''}}}

* Find words in the unix dictionary file that begin with book and end with y - {{Command{ grep '^book.*y$' /usr/share/dict/words }}} - Don't forget the anchors!
* Find words that are 4 characters beginning with a b and ending with a k - {{Command{ grep '^b..k$' /usr/share/dict/words }}}
* Find words in the dictionary file that begin with or end with the string book - {{Command{ egrep '(^book|book$)' /usr/share/dict/words }}}
* Match Kunsela ~C-Wing B&W printer queue names - {{Command{ grep '^c...lpr' /opt/pub/ncs205/data/filter_examples/printcap | cut -d '|' -f 1}}}
* Extract the mail sender and subject for the last 4 emails received - {{Command{egrep "^(From|Subject): " /var/mail/$USER | tail -n 8}}}

!!!! Input verification:
Completely match -
* a valid campus username - {{Monospaced{'' [a-z]{1,8}([0-9]{1,2})? ''}}} - 1 to 8 lowercase letters optionally followed by 1 or 2 numbers.
* a phone number - {{Monospaced{'' '^[0-9]{3}[-. ][0-9]{3}[-. ][0-9]{4}$' ''}}}
* time (12hr clock) -  {{Monospaced{'' '^(1-9|1[0-2]):[0-5][0-9] [ap]m$' ''}}}
* a Dollar amount 

What is the difference between [ab]* and (a*|b*)


!!! More complex examples:

!!!! Search the ssh configuration files for all configuration directives that are enabled with a yes:  

Read this from bottom to top
{{{
egrep '^[^#]+ yes' /etc/ssh/ssh{,d}_config
       ^  ^ ^^---              ----
       |  | || ^                 ^--- Match both ssh_config and sshd_config
       |  | || |--------------------- Followed by the string yes
       |  | ||----------------------- There must be a space between the configuration item and its value
       |  | |------------------------ The + modifies the [^#] to allow for any length of characters here.
       |  |-------------------------- Followed by any character which is not a #.  We don't want comments.
       |----------------------------- The line begins 
}}}
This part is the regular expression that searches for the text:  {{Monospaced{'' ^[^#]+ yes ''}}}

This part is the file globbing pattern that identifies the files to search:   {{Monospaced{''/etc/ssh/ssh{,d}_config''}}}


!!!! Search all networking configuration files on the class shell server for an IP address:

{{Command{egrep '((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])' /etc/sysconfig/network-scripts/ifcfg-*}}}

It's important to understand how the metacharacters differ between file globbing and regular expressions.  In regular expressions, the {{Monospaced{'' ? ''}}} or {{Monospaced{'' * ''}}} modify the atom immediately before them.  For example, in the IP address above, the {{Monospaced{'' ? ''}}} in {{Monospaced{'' 1?[0-9]?[0-9] ''}}} means both the {{Monospaced{'' 1 ''}}} and the {{Monospaced{'' [0-9] ''}}} range are optional.  In file globbing, the {{Monospaced{'' ? ''}}} or {{Monospaced{'' * ''}}} stand alone and represent either one single character or 0 or more characters by themselves.

The regular expression to identify an IP address is rather complex.  We can't just do {{Monospaced{'' [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} ''}}}  if we're only trying to match IP addresses and call it a day.  This one would work to identify a proper IP like 192.168.0.1 but would also match an invalid IP address like 555.555.555.555.


To break this regex down piece by piece:
{{{
((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])
 ^ ^     ^  ^   ----------- -------^ ^  ^  ---------------------------------
 | |     |  |        ^         ^   | |  |                ^-- Repeat the first group again to identify a single octet without the dot at the end.
 | |     |  |        |         |   | |  |--- Repeat the previous group within the ( ) three times for the first three octets 
 |---------------------------------|-------- These ( ) are used for alternation with the |, eg: (This|or|that) will match one of those three words.
   |     |  |        |         |     |------ Escape the dot so it's actually just a dot and cannot represent any single character
   |     |  |        |         |------------ Match an octet from 250-255
   |     |  |        |---------------------- Match an octet from 200-249
   |     |  |------------------------------- Notice the lack of the ?.  We must have at least a single digit in each octet
   |     |---------------------------------- The ? makes the [0-9] optional.  This would allow for a two digit IP address
   |---------------------------------------- This optional 1 allows for octets in the 100-199 range
}}}


! Assignment

* Complete labs [[Lab B1|labs/labB1.pdf]], [[Lab B2|labs/labB2.pdf]], [[Lab B3|labs/labB3.pdf]], & [[Lab B4|labs/labB4.pdf]]
** These all are extra credit labs.
** Be sure to use an uppercase {{Monospaced{''B''}}} as part of the lab number
** We haven't done much with regular expressions and this is a complex topic.  Please make use of the discussion boards if you have any questions or run into any trouble.
! Material

!! Read:
* Chapter 11 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
* Chapter 26 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
** The goal from Ch 26 is to get an understanding for functions.  The whole thing can be summed up by the example on page 385 and the contents of the //Shell Functions In Your .bashrc File// box on page 391.

!! Watch:
* [[Customizing Your Terminal: .bash_profile and .bashrc files|https://www.youtube.com/watch?v=vDOVEDl2z84]]
* [[Customizing Your Terminal: Adding Color and Information to Your Prompt|https://www.youtube.com/watch?v=LXgXV7YmSiU]]
* [[Creating Aliases for Commands|https://www.youtube.com/watch?v=0liXeoADU6A]]


! Notes

!! Working with the shell

A user automatically executes a shell when they log into a Unix system.  A shell is a special type of program that receives and interprets commands.  This shell is what users interact with on the command line.  You are then logged out of the system when your shell terminates.

The shell you are using is specified as the last column in the {{File{/etc/passwd}}} file.  Bash is the standard default, but many others exist.

The different shells available on the system are usually listed in the file {{File{/etc/shells}}}.
* {{Command{/sbin/nologin}}} : A special shell used for disabled accounts which should not be able to log in.
** You'll see service accounts in {{File{/etc/passwd}}} with this shell.  Users should never be able to log into those service accounts
** A service account with a valid shell is a major red flag and a sign your system has been tampered with.


!!! Bash Customization

RTFM!  {{Command{man bash}}}

!!!! Shell initialization and configuration scripts - executed upon login
Your shell environment can be customized by the system administrator and by the user.  The sysadmin may have some site-specific changes to make.  For example, I change the default umask for everyone on our class shell server.  Each user may customize their shell environment either cosmetically, such as by changing the shell prompt, or functionally, such as by changing the PATH or adding command aliases.

The shell environment is customized through a series of script files.  They are executed in the following order so the admin can set defaults that the users can override with their own customizations.  These scripts work like any of the scripts we've been writing for this class.  Any commands entered will be executed when these scripts run at login or logout.

Interactive login shell execution sequence.  When you first log in to the system, the following are executed (if they exist):
* {{File{/etc/profile}}} contains general system defaults 
* All scripts in the directory {{File{/etc/profile.d/}}}
** Putting individual settings in their own files makes it easier to maintain the changes
** The file {{File{/etc/profile.d/umask.sh}}} sets our default umask
* {{File{~/.bash_profile}}} is controlled by the user for their custom changes
** Put things you want to run during a new login session in this file.  Items in this file will not be executed if a new shell instance is executed.
* {{File{~/.bashrc}}} is executed only in interactive shells.  This file may contain extra functions and aliases.
** Put settings (like aliases and prompt changes) in this file so they will be activated if a new shell session is run
* {{File{~/.profile}}} may exist instead of {{File{~/.bash_profile}}} on some systems
* //User disconnects//
* {{File{~/.bash_logout}}} will execute when the user logs out.

{{Command{ source //file// }}} (or {{Command{ . //file// }}})
<<<
Read and execute commands from //file// in the current shell environment.  Apply changes within an environment script file to the current login shell.
<<<

!!!! Example:
Suppose each time a user logs in, we want to display their last three logins to the screen.  The following would be added to either the site-wide {{File{/etc/profile.d/}}} directory or appended to their {{File{~/.bash_profile}}}.  We would choose {{File{/etc/profile.d/}}} if we didn't want the users to be able to remove it.  We would choose the user's {{File{~/.bash_profile}}} if we wanted users to be able to override it.  We would not put it in {{File{~/.bashrc}}} because we only want this information displayed when the users log in, not when they just run a new shell.

{{{
last -3 $USER
}}}


!!!! Default user dotfiles
The directory {{File{/etc/skel}}} contains default copies of  {{File{~/.bash_profile}}}, {{File{~/.bashrc}}}, and {{File{~/.bash_logout}}}.  These can be copied to the home directories of new users so they have defaults available for their accounts.


!!!! Other shell configuration files:
Readline library - A library for reading a line of input from the terminal 
* Configured by {{File{/etc/inputrc}}} and {{File{~/.inputrc}}}
* These files mostly control additional key bindings
* I like to enable the ~PageUp and ~PageDown keys on other systems for fast command recall.  They're not enabled by default on Debian.

{{File{/etc/~DIR_COLORS}}}
<<<
Configure directory listing colorization
<<<
Disable ls colorization on the ~VMs.  Sometimes color makes it hard to read the text
* Edit {{File{/etc/~DIR_COLORS}}}
* change {{Monospaced{ ''tty'' }}} to {{Monospaced{ ''none'' }}}

{{File{/etc/motd}}} - A ''m''essage ''o''f ''t''he ''d''ay to display to users after they log in


!!! Aliases

Command aliases provide a way to execute long command strings with fewer keystrokes.  Additional options and arguments can be added to an alias.  For example, running {{Command{ l. -l }}} will display all files which begin with a dot in long-listing format.  The {{Command{ l. }}} alias will be translated to {{Command{ ls -d .* }}} and then the {{Monospaced{ -l }}} option will be added.

Display currently defined aliases:  {{Command{alias}}}

Set an alias:  {{Command{alias name='long_cmd -abcd | next_command -efgh'}}}

Standard aliases
* {{Command{ll}}} - Aliased to {{Command{ ls -l }}} on most systems
* {{Command{l.}}} - Aliased to {{Command{ ls -d .* }}} - Display //only// files which begin with a dot.

Override aliases:
* The {{Command{ rm }}} command is usually aliased to {{Command{ rm -i }}} on most systems so you are prompted before deleting each file.  
* Prefix your command with a \ (backslash) to suppress this alias expansion and execute {{Command{ rm }}} normally:  {{Command{ \rm foo }}}

Remove an alias for the current login session:  {{Command{unalias //alias//}}}

{{Command{which}}} and {{Command{type}}}
* These commands will display how each argument would be interpreted if executed as a command
* Aliases will be translated to their actual commands so you know what is really being executed


!!! Core shell options:

Stored in the {{Monospaced{$SHELLOPTS}}} variable
Manipulated with the set command

Enable a shell option:  {{Command{set -o //option//}}}
Disable a shell option:  {{Command{set +o //option//}}}

Examples: 

Toggle command line input method between vi and emacs:
{{Command{set -o vi}}}
{{Command{set -o emacs}}}

Enable noclobber:
With noclobber enabled, an existing file will not be overwritten by redirecting STDOUT to a file 
{{Command{set -o noclobber}}}
{{Command{set +o noclobber}}}

!!! Extra shell options:

{{Command{shopt}}} - Display a list of available options
 ''-s'' to enable an option
 ''-u'' to disable an option

Examples:
* {{Command{ shopt -s cdspell }}} - minor errors in the spelling of a directory component in a cd command are corrected.
* {{Command{ shopt -s checkjobs }}} - lists the status of any stopped and running jobs before exiting an interactive shell.


!!! Environment & Shell variables

In bash, variables are defined on the command line with this syntax:  {{Command{variable=value}}}
By default all variables are local and will not be inherited by child processes

The {{Command{export}}} command will make a variable global and accessible to any child process
{{Command{export}}} can be used when defining a global variable.  eg:  {{Command{export foo=bar}}}
Or, can be used to elevate a currently defined variable to global.  eg:  {{Command{foo=bar ; export foo}}}

{{Command{set}}} will display all currently set variables

{{Command{unset}}} can be used to unset a variable


The shell environment can be manipulated through variables:

For example, the {{Monospaced{$PATH}}} and the prompt variable, {{Monospaced{$~PS1}}}:

The prompt:

* ~PS1 - Primary prompt string is stored in this variable
* Other secondary PS variables exist.
** See https://ss64.com/bash/syntax-prompt.html for more details.

Display your current prompt string: {{Command{ echo $~PS1 }}}

The last character in your prompt - {{Monospaced{ ''#'' }}} vs {{Monospaced{ ''$'' }}}
* {{Monospaced{ ''$'' }}} at the end of the prompt means the user is a regular, unprivileged user.
* {{Monospaced{ ''#'' }}} at the end of the prompt means the user is a superuser.  
* This tagging makes it easier to see your privilege level.

Customized prompts I like for this class.  This prompt makes it easier to see the full path to the current directory and show long command strings on the projector.  The second version adds color.
{{{
PS1='\n[\u@\h \w] :\n\$ '
PS1='\n[\e[1;31m\u\e[m@\e[1;33m\h\e[m \w] :\n\$ '
}}}
Changing the ~PS1 variable by running one of the above commands applies the change immediately to your login session.  It will be reset when a new shell executes.  Add the change to your {{File{~/.bashrc}}} to make it permanent.


!!! Functions:

Functions can provide a shortcut to more complicated command sequences.  They can be used in shell scripts or directly from the command line.

Append to your {{File{~/.bashrc}}}:
{{{
function bak() {
        # This function creates a backup in the current working directory of any single file passed as an argument.
        # Example: bak test.sh
        cp "$@" "$@".`date +%y%m%d:%H%M`.bak
}
}}}

After adding this function to your {{File{~/.bashrc}}}, activate the new version by running  {{Command{ . ~/.bashrc}}} or reloading the shell.


!!! History substitution:

* Your command history is saved in a buffer for the current login session
* By default, the buffer is appended to {{File{~/.bash_history}}} upon logout
* You can then display the current session's history buffer with the {{Command{history}}} command.

There are history configuration variables to change this behavior:
  - {{Command{set | grep HIST}}}
 

! Assignment

* An extra credit environment / scripting lab is available - [[Lab C1|labs/labC1.pdf]]
** Be sure to use an uppercase {{Monospaced{''C''}}} as part of the lab number
** There is no firm due date for this lab.  Please try to have it in by the end of November
! Material

!! Reading:
* Linux Administration, Chapter 14 - Linux Firewall

There's a lot of good networking information in this chapter:
* A NAT Primer on pages 301-302 is important to know in a general sense, but not necessary for this class.
* The ~NetFilter background information throughout the chapter is good to know, but the {{Command{ iptables }}} command for managing the firewall has largely been replaced by the {{Monospaced{firewalld}}} tool, {{Command{ firewall-cmd }}}
* The flow chart on the top of page 304 is important to understand
* Pay particular attention to the firewalld section from 317-319.

! Notes

Effective security requires a [[multi-layered approach|https://www.techrepublic.com/blog/it-security/understanding-layered-security-and-defense-in-depth/]], [[defense in depth|https://www.us-cert.gov/bsi/articles/knowledge/principles/defense-in-depth]], and adherence to [[principle of least privilege|https://www.us-cert.gov/bsi/articles/knowledge/principles/least-privilege]].  Ideally, a weakness or vulnerability uncovered in one layer will be mitigated by another security layer.  

Five good examples of this we have deployed:
* {{Monospaced{ntpd}}} and {{Monospaced{named}}} on your core VM have ~ACLs in place to limit who can communicate with those services
* Direct login with the root account is now blocked.  Only authorized user accounts can elevate privileges via {{Command{su}}} or {{Command{sudo}}} through membership to the wheel group.  Just having the root password isn't enough.
* Rather than give a webmaster full root access, we configured {{Command{sudo}}} to provide elevated privileges just to the functions he may need, adhering to the principle of least privilege
* By implementing two-factor authentication, we have a second layer of security to protect us in case credentials are stolen.  
* SSH access to our class shell server cannot be blocked by a firewall.  We all need to be able to connect from anywhere.  The server is under constant brute-force login attack from all over the internet.  The [[fail2ban|https://www.fail2ban.org/]] tool was deployed on the class shell server to automatically detect and block those attacks.

Host-based firewalls are another important layer in the security of a system.  Perimeter firewalls are important, but what happens if an attacker is already within your walls?  Restricting access to open ports on your systems to only those needing to communicate with them is a good way to further limit your exposure to attack.

Systems must always be built with the assumption that outer layers have been or will be breached.  For example, 
* Assume your password will be stolen at some point.  Use two-factor authentication everywhere you can.
* Assume the perimeter firewall will be breached at some point.  Deploy a host-based firewall on your servers.
** Assume the host-based firewall may be accidentally disabled or bypassed.  Configure your services to properly use ~ACLs.
* Assume a user will have a weak password that could be brute-forced.  Deploy a system to detect and block brute-force login attempts.


!! ~SaltStack Example

[[SaltStack|https://www.saltstack.com]] is an open-source platform for server automation and remote task execution.  It's very powerful, easy to deploy, and easy to use.  We're using it in our virtual lab to facilitate the management and monitoring of the class ~VMs.  

~SaltStack consists of:
* A master server which serves as the central control hub to issue commands and push configuration changes
* Minions, which are the nodes connecting to and being managed by the master.

Your ~VMs are all Salt minions connecting to a master server I control.  This enables me to quickly and easily push configuration changes and review the state of your ~VMs in bulk.  Instead of having to connect to each system to fix or review something, I can issue a single command which will run on all of them.

Early last year two [[highly critical vulnerabilities|https://labs.f-secure.com/advisories/saltstack-authorization-bypass/]] ([[CVE-2020-11651|https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11651]] and [[CVE-2020-11652|https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11652]]) were disclosed impacting the ~SaltStack master server.  This vulnerability allows any attacker who can communicate with the ~SaltStack network ports on the master to completely take it over by sending control messages, read and write files to the master, and steal its secret keys.  The attacker will thus have complete control of the master server and all minions connecting to it.  This vulnerability will then allow for a complete compromise of all systems within a ~SaltStack deployment.

The [[Common Vulnerabilities and Exposures (CVE)|https://en.wikipedia.org/wiki/Common_Vulnerabilities_and_Exposures]] database managed by [[Mitre|https://cve.mitre.org/]] contains a list of all publicly disclosed security vulnerabilities. The ~CVEs are assigned a [[CVSS score|https://nvd.nist.gov/vuln-metrics/cvss]] ranging from 0 (benign) to 10 (critical) to rate their severity.  A CVSS score of 10 generally means full system compromise can be remotely accomplished.  These are "drop what you're doing and fix this now" vulnerabilities.  

Both ~SaltStack ~CVEs were assigned a CVSS score of 10.  ~F-Secure, the company which discovered the weaknesses, posted in their blog:  “Patch by Friday or compromised by Monday”.

This is a great example of the need for multiple layers of security.  There will always be time gaps between when vulnerabilities are introduced in software, when they are discovered, when patches are available, and when those patches can be applied.  Those time delays are occasionally significant
* 2 years for [[CVE-2014-0160|https://www.cvedetails.com/cve-details.php?t=1&cve_id=CVE-2014-0160]], also known as [[HeartBleed|https://heartbleed.com/]], which allowed the compromise of a web server's SSL secret keys and the decryption of ~SSL-encrypted network communication.
* 20 years for [[CVE-2020-0601|https://nvd.nist.gov/vuln/detail/CVE-2020-0601]] with a [[8.1|https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?name=CVE-2020-0601&vector=AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:N&version=3.1&source=NIST]] rating ([[details|https://krebsonsecurity.com/2020/01/cryptic-rumblings-ahead-of-first-2020-patch-tuesday/]]) involving a core cryptographic library in Windows.

~F-Secure, in their [[blog post|https://blog.f-secure.com/new-vulnerabilities-make-exposed-salt-hosts-easy-targets/]], identified 6000 vulnerable ~SaltStack master servers through scans of the entire Internet.  Use of a firewall to prevent anyone on the internet from communicating with these servers would have been the first step in protecting them from abuse by the entire world and is especially vital now that vulnerabilities have been discovered.

Luckily, the ~SaltStack master used for our class is protected from the entire Internet by the perimeter firewall.  But what if an attacker is already on our network, either physically or virtually.  What if a misconfiguration of the perimeter firewall allows traffic to our master?  A host-based firewall must be deployed to protect this system by only allowing our class ~VMs to communicate with the Salt master.


!! Packet filter firewalls

Another component of system security which allows us to:
* Filter unwanted network traffic
* Log & monitor network traffic
* Block brute force attacks
* Rate limit to counter minor ~DoS events

Filter minimally based on source or destination address, ports, or protocol types
We can either default to deny or default to allow
Optional logging
 - logging is useful for regular monitoring and debugging

Ingress or Egress filtering
 - Control both what flows in and out of the system.
 - For example, filtering egress from a web server could effectively block [[reverse shell|https://www.acunetix.com/blog/web-security-zone/what-is-reverse-shell/]] attacks


!! Developing firewall rules
* We must first understand what communication is expected to take place
** Know the source and destination of the network traffic
*** Is it a new connection or related to an existing connection ?
** Match services to port numbers
*** Consult {{File{/etc/services}}} for a mapping of port numbers to service names.
*** Low ports 0-1023 are well-known ports and privileged.  They may only be bound by a root-controlled process.
*** Ports 1024-49151 are [[registered|https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml]].  Port registration is issued by IANA.
*** Ports 49152-65535 are dynamic ports and generally not listening for services.  These are often used for replies and established connections.
** Observe what is currently listening for connections:
*** {{Command{ss}}} - Socket Statistics (the new tool). Installed by default
**** {{Command{ss -tunlp}}} - Display which ports are currently listening for connections.  The -n option will display IP addresses and port numbers instead of their names.  
**** {{Command{ss -tulp}}} - Display which ports are currently listening for connections.  Omitting the -n option will display host and service names instead of their numbers. 
*** {{Command{netstat}}} - Print network connections (the old tool).  This command requires the ''net-tools'' package.
**** {{Command{netstat -tunlp}}} - Display which ports are currently listening for connections.  The -n option will display IP addresses and port numbers instead of their names.  
**** {{Command{netstat -tulp}}} - Display which ports are currently listening for connections.  Omitting the -n option will display host and service names instead of their numbers. 
*** {{Command{lsof -i -n -P}}} - ''l''i''s''t ''o''pen ''f''iles.  Displays files and ports in use along with the processes which are utilizing them.  Requires the ''lsof'' package.
** Use a tool like [[nmap|https://nmap.org/book/man.html]] to scan a remote system
*** Scanning a remote system with nmap can reveal what services are listening or how effective your firewall is
** Some services listen on both TCP and UDP.  Be sure to take note of the protocol being used.
* Once you know what should be listening, then create rules to allow desirable traffic
** Determine if any traffic should be blocked
** Decide what to do with remaining ports
*** Allow 
*** Allow but log
*** Deny

!!! Examples

Display listening ports on our class lab Proxmox server.  The third column shows the IP address the services is bound to.  The IP address of 0.0.0.0 means all IP addresses.  The IP address 127.0.0.1 means localhost, or the internal system.
{{{
root@lab ~ # netstat -tunpl
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 0.0.0.0:2222            0.0.0.0:*               LISTEN      789/sshd: /usr/sbin
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/init
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      8673/nginx: master
tcp        0      0 127.0.0.1:85            0.0.0.0:*               LISTEN      1669/pvedaemon
tcp        0      0 127.0.0.1:25              0.0.0.0:*               LISTEN      1633/master
tcp        0      0 0.0.0.0:443             0.0.0.0:*               LISTEN      8673/nginx: master
tcp        0      0 0.0.0.0:3128            0.0.0.0:*               LISTEN      1684/spiceproxy
udp        0      0 0.0.0.0:111             0.0.0.0:*                           1/init
}}}

{{Note{''Note:'' Examine your output closely.  Not all ports need to be opened in the firewall.  In the output above, we can see ports 25 & 85 are bound to 127.0.0.1, the loopback address.  These two ports can only be connected to from within the system and should not receive firewall rules.  Also think about what services on your server should be receiving external connections.  Not all should not be receiving connections from external systems and should not have firewall rules created for them.}}}


Perform a port scan of the class lab Proxmox server from an external host.
{{{
nick@trillian:~>nmap lab.ncs205.net
Starting Nmap 7.70 ( https://nmap.org ) at 2022-04-12 23:19 EDT
Nmap scan report for lab.ncs205.net (94.130.36.42)
Host is up (0.12s latency).
Not shown: 892 filtered ports, 103 closed ports
PORT     STATE SERVICE
22/tcp   open  ssh
53/tcp   open  domain
80/tcp   open  http
443/tcp  open  https
3128/tcp open  squid-http

Nmap done: 1 IP address (1 host up) scanned in 84.21 seconds
}}}

Notice the difference between the two reports?  
* Ports listed in the first report but not the second are being blocked at the external firewall
* Ports listed in the second report but not the first are being forwarded by the Proxmox server to internal ~VMs


!!Stateful inspection - State module

* Stateless - inspect each packet in isolation.  Examine source and destination hosts and ports then decide what to do.
* Stateful - maintain the state of network connections.  These states can be used to determine policy.
** For example, allow traffic that is already part of an established connection (TCP) or is a reply to a previous request (UDP)

Inspect the traffic to allow expected replies
* -m state &nbsp; &#45;-state //state//
* State is a comma delimited list of:
** NEW - the packet has started a new connection
** ESTABLISHED - the packet is associated with a connection which has seen packets in both directions
** RELATED - the packet is starting a new connection, but is associated with an existing connection
** INVALID - the packet  could not be identified for some reason (typically some type of error)

Going into detail on these networking concepts is beyond the scope of this class.  But they are important to understand.


TCP - already stateful:
* 3-way handshake
* Evaluate TCP flags to determine state
[img[img/state-tcp-connection.jpg][http://www.iptables.info/en/connection-state.html]]

UDP - stateless
* no flags to evaluate
* kernel tracks outbound UDP packets.  Responses to outstanding requests are marked
[img[img/state-udp-connection.jpg][http://www.iptables.info/en/connection-state.html]]


!! Tool Overview
* netfilter - What's running under the hood
** This is manipulated with the deprecated {{Command{iptables}}} command.
* firewalld - A new front-end to simplify managing the firewall
** This is manipulated with the new {{Command{firewall-cmd}}} command.  This is the standard tool to use in ~CentOS 7.

The new ~FirewallD and its set of tools makes the management of a basic firewall very easy.  You no longer need to know intricacies of how ~NetFilter works and how its rules are created.  ~FirewallD will take care of that for you.  It's good info to know and is included here for reference.  You can skip the //Linux ~NetFilter// section if you'd like and continue with the ~FirewallD section down below.  We'll be using {{Command{ firewall-cmd }}} to manage our firewall instead of {{Command{ iptables}}}, the old command.

----

!!Linux [[netfilter|http://www.netfilter.org/]]
* controlled by the {{Command{ iptables }}} command

!!! Table:
* Sets of chains 
* Default table is named filter
* Additional tables:
** NAT table
** Mangle table - for specialized packet alteration (~QoS)
** Raw table - for configuration exemptions

!!! Chains of rules:
* Firewall rules are grouped into chains
* Rules within a chain are interpreted in order, top to bottom
** Until a match is found
** Or the default target is reached (ACCEPT or REJECT)
* Default chains:
** INPUT: traffic addressed to the system
** OUTPUT: traffic leaving the system
** FORWARD: all packets arriving on one network interface and leaving another
* Custom chains can be created for organizing similar rules

!!! Rules:
* Rules contain a criteria and a target
* The criteria is based on attributes of the packet, such as IP addresses or ports.
* If the criteria is match, either perform the specified action or continue rule processing within the target
* If the criteria is not matached, move on to the next rule.
* Terminate with the chain's default target

!!! Targets:
Each rule contains a target clause to determine what to do with matched packets:
* ACCEPT - allow the packet to proceed
* DROP - silently reject the packet (causes TCP retries)
* REJECT - reject the packet with an ICMP error message
* LOG - track the packet as it matches a rule
* REDIRECT - redirect packets towards a proxy
* RETURN - terminate user-defined chains
* QUEUE - transfer packets to local user programs via a kernel module
* A custom chain may be specified as a target.  Rules in that chain will be evaluated.


!!! iptables Commands

iptables -h

!!!! Saving your rules
iptables-save > /tmp/iptables.rules
iptables-restore < /tmp/iptables.rules

service iptables save
rules are stored in /etc/sysconfig/iptables


!!!! Firewall Operations:
| !Option | !Definition |
|-L [&#45;-line-numbers] [-v] |List all rules|
|-I //chain-name// //position-number//  //rule// |Insert rule into a chain|
|-A //chain-name//  -i //interface//  -j  //target// |Append the current target to the chain|
|-D //chain-name// //position-number// |Delete a rule from a chain|
|-P //chain-name//  //target// |Sets default policy for the chain|
|-F //chain-name// |Flush all rules in a chain|
|-N //chain-name// |Create a new chain|

!!!! Filter criteria command line options:
| !Option | !Definition |
| -p proto |Match by protocol: tcp, udp, or icmp|
| -s source-ip |Match host or network source IP address|
| -d dest-ip |Match host or network destination address|
| &#45;-sport port# |Match by source port|
| &#45;-dport port# |Match by destination port|
| &#45;-icmp-type type |Match by ICMP type code|
| -i int |Match by interface|
| &#33; |Negate a clause|
| -t table |Specify the table to which a command applies (default is filter)|
| -j //target// |Specify target to use|

!!!! Extensions:
| -m state &nbsp; &#45;-state //state// |filter based on specified //state//|
| -m multiport &#45;-dports //port1//,//port2//,...//portN// |filter multiple ports|
| -m owner &#45;-uid-owner //uid// |filter based on user name|


!!! Examples:

{{{

iptables -L --line-numbers -v

# Allow established traffic:
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT


iptables -A INPUT -s 10.103.35.0/24 -p tcp --dport 4444 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 20,21 -j ACCEPT

iptables -A OUTPUT -p tcp -m multiport --dports 20,21 -j REJECT


# SSH chain:
iptables -N SSH
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.103.36.24/29
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.103.36.10
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 150.156.192.0/24
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.156.195.0/24
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 150.156.193.20
iptables -A SSH -p tcp --dport 22 -j LOG -m limit --limit 1/sec --log-prefix "IPTables-SSH: " --log-level 4

iptables -I INPUT 6 -j SSH -m state --state NEW
}}}

----

!! [[FirewallD|https://firewalld.org/]]

~FirewallD is the new way to manage Linux firewalls.  Everything above in the ~NetFilter section is still running under the hood, but ~FirewallD provides a nice front-end to manage things.

Most of the following information is directly from the [[RedHat Linux Firewalls|https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls]]] documentation.

The Firewall Stack:
[img[img/firewall-stack.png]]


!!! Zones
firewalld can be used to separate networks into different zones according to the level of trust that the user has decided to place on the interfaces and traffic within that network. A connection can only be part of one zone, but a zone can be used for many network connections. 
!!!! Available zones:
* ''Block'':  Any incoming network connections are rejected with an icmp-host-prohibited message for ~IPv4 and icmp6-adm-prohibited for ~IPv6. Only network connections initiated from within the system are possible. 
* ''dmz'': For computers in your demilitarized zone that are publicly-accessible with limited access to your internal network. Only selected incoming connections are accepted. 
* ''drop'': Any incoming network packets are dropped without any notification. Only outgoing network connections are possible. 
* ''external'': For use on external networks with masquerading enabled, especially for routers. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted. 
* ''home'': For use at home when you mostly trust the other computers on the network. Only selected incoming connections are accepted. 
* ''internal'': For use on internal networks when you mostly trust the other computers on the network. Only selected incoming connections are accepted. 
* ''public'' (default): For use in public areas where you do not trust other computers on the network. Only selected incoming connections are accepted. 
* ''trusted'': All network connections are accepted. 
* ''work'': For use at work where you mostly trust the other computers on the network. Only selected incoming connections are accepted. 


!!! Runtime vs. Permanent Settings

There are two firewall configurations:  
* The runtime settings define the firewall rules currently in effect 
* The permanent settings reflect the stored configuration that will be reloaded if the firewalld service restarts.

Any changes applied to the running firewall only apply while firewalld is running. When firewalld is restarted or the system reboots, the settings revert to their permanent values.

To make firewall changes persistent across reboots, rules need to be saved in both locations.  This can be accomplished two different ways:

Modify the runtime configuration first:
# Add a new rule to the runtime configuration:  {{Command{ firewall-cmd &#45;-add-service=ssh }}}
# Test your rule and system.  Make sure nothing is broken.
# Make your changes permanent if everything works:  {{Command{ firewall-cmd &#45;-runtime-to-permanent }}}
# Or, discard your runtime changes and reload the permanent configuration if there is a problem {{Command{firewall-cmd &#45;-reload }}}

Modify the permanent configuration first:
# Add a new rule to the permanent configuration:  {{Command{firewall-cmd &#45;-permanent &#45;-add-service=ssh}}}
# Reload the permanent configuration {{Command{ firewall-cmd &#45;-reload }}}


!!! Predefined Services

A service can be a list of local ports, protocols, source ports, and destinations, as well as a list of firewall helper modules automatically loaded if a service is enabled. Using services saves users time because they can achieve several tasks, such as opening ports, defining protocols, enabling packet forwarding and more, in a single step, rather than setting up everything one after another. 

Service configuration options and generic file information are described in the firewalld.service(5) man page. The services are specified by means of individual XML configuration files located in {{File{/usr/lib/firewalld/services/}}} which are named in the following format: //service-name//.xml. Protocol names are preferred over service or application names in firewalld. 

This example contains both pre-defined services and a list of ports to allow.  The dhcp-client, ntp, and ssh services are allowed.  The ports and protocols for these services are defined in their XML file.  The TCP ports 4505 and 4506 are also specifically allowed.  Using service names is preferred if a service definition file is available.

{{{
[root@head ncs205]# firewall-cmd --list-all
public (active)
  target: default
  icmp-block-inversion: no
  interfaces: eth0
  sources:
  services: dhcpv6-client ntp ssh
  ports: 4505-4506/tcp
  protocols:
  masquerade: no
  forward-ports:
  source-ports:
  icmp-blocks:
  rich rules:
}}}


!!! Basic commands

Here are a few basic commands involved in managing a firewall using the ~FirewallD management command, {{Command{firewall-cmd}}}

* Display the current state of the firewall: {{Command{ firewall-cmd &#45;-state }}}
* Display all options available: {{Command{ firewall-cmd -h }}}
* Display the active zones: {{Command{ firewall-cmd &#45;-get-active-zones }}}
** The default zone is //public//.  We'll stick with the default for our ~VMs
* Add a port to the permanent zone: {{Command{ firewall-cmd &#45;-add-port=5667/tcp &#45;-permanent }}}
* Remove a port from the permanent zone: {{Command{ firewall-cmd &#45;-remove-port=5667/tcp &#45;-permanent }}}
* Reload the firewall configuration and activate any new rules added to the permanent zone: {{Command{ firewall-cmd &#45;-reload }}}
* Add a new service to the runtime zone: {{Command{ firewall-cmd &#45;-add-service=ssh  }}}
** When possible, try to add services by name instead of port numbers.
* Remove a service from the runtime zone: {{Command{ firewall-cmd &#45;-remove-service=ssh  }}}
* Copy the runtime configuration to permanent: {{Command{ firewall-cmd &#45;-runtime-to-permanent }}}
* Get a list of all services known to firewalld:  {{Command{ firewall-cmd &#45;-get-services }}}
* List the current runtime firewall configuration: {{Command{ firewall-cmd &#45;-list-all }}}
* List the current permanent firewall configuration: {{Command{ firewall-cmd &#45;-permanent &#45;-list-all }}}
* Forward port 80 from the external interface to port 80 on 10.0.0.10 through the internal interface: {{Command{ firewall-cmd &#45;-zone=external &#45;-add-forward-port=port=80:proto=tcp:toaddr=10.0.0.10:toport=80 }}}
** Only here for reference.  We're not doing any port forwarding in this class.  This command is handy if you're working with a Linux-based router.

{{Note{''Note:'' When possible, allow traffic through the firewall with the {{Monospaced{-&#45;add-service}}} option instead of {{Monospaced{-&#45;add-port}}}.  It will result in a cleaner configuration for services which utilize multiple ports, such as DNS.  For example, if you only run {{Monospaced{-&#45;add-port=53/udp}}} and neglect the TCP protocol, you'll allow DNS queries to your server but will block zone transfers to the slave, which utilize TCP. }}}

!Sources

http://bodhizazen.net/Tutorials/iptables
http://www.thegeekstuff.com/2011/06/iptables-rules-examples/
http://www.liniac.upenn.edu/sysadmin/security/iptables.html
http://www.borgcube.com/blogs/2014/05/securing-ntp-and-rate-limiting-using-iptables/
http://fideloper.com/iptables-tutorial	
https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls

! Assignment

<<tiddler [[Lab H - Host-based Firewalls]]>>
Like most wikis, TiddlyWiki supports a range of simplified character formatting:
| !To get | !Type this |h
| ''Bold'' | {{{''Bold''}}} |
| ==Strikethrough== | {{{==Strikethrough==}}} |
| __Underline__ | {{{__Underline__}}} (that's two underline characters) |
| //Italic// | {{{//Italic//}}} |
| Superscript: 2^^3^^=8 | {{{2^^3^^=8}}} |
| Subscript: a~~ij~~ = -a~~ji~~ | {{{a~~ij~~ = -a~~ji~~}}} |
| @@highlight@@ | {{{@@highlight@@}}} |
| Tiddler Comments | {{{/%}}} text {{{%/}}}. |
| [[Make me a tiddler]] | {{{[[Make me a tiddler]]}}} |
| ~NoTiddler | {{{~NoTiddler}}} |
| {{{This is monotype}}} | {{{{{{This is monotype}}}}}} |

*sample:
|!th1111111111|!th2222222222|
|>| colspan |
| rowspan |left|
|~| right|
|bgcolor(#a0ffa0):colored| center |
|caption|c
For advanced effects, you can control the CSS style of a table by adding a row like this:
{{{
|cssClass|k
}}}


<<<
The highlight can also accept CSS syntax to directly style the text:
@@color:green;green coloured@@
@@background-color:#ff0000;color:#ffffff;red coloured@@
@@text-shadow:black 3px 3px 8px;font-size:18pt;display:block;margin:1em 1em 1em 1em;border:1px solid black;Access any CSS style@@
<<<
!!@@display:block;text-align:center;centered text@@

//For backwards compatibility, the following highlight syntax is also accepted://
{{{
@@bgcolor(#ff0000):color(#ffffff):red coloured@@
}}}
@@bgcolor(#ff0000):color(#ffffff):red coloured@@

/*{{{*/

@@color(yourcolorhere):colored text@@
@@color(fuchsia):colored text@@
@@bgcolor(yourcolorhere):your text here@@

[img[title|filename]]
[img[filename]]
[img[title|filename][link]]
[img[filename][link]]
[[text|url]]
[[Existing Tiddler Name|UglyTiddlerName]]

<<macro>>
<hr> = ----

*Entry One
**Sub-entry A
***Sub-sub-entry i
***Sub-sub-entry ii
**Sub-entry B
*Entry Two
*Entry Three
Use number signs (#'s) instead of asterisks for <OL type=1>

Tables:
|!Headings: add an exclamation point (!) right after the vertical bar.|!Heading2|!Heading3|
|Row 1, Column 1|Row 1, Column 2|Row 1, Column 3|
|>|>|Have one row span multiple columns by using a >|
|Have one column span multiple rows by using a ~|>| Use a space to right-align text in a cell|
|~|>| Enclose text in a cell with spaces to center it |
|>|>|bgcolor(green):Add color to a cell using bgcolor(yourcolorhere):|
|Add a caption by ending the table with a vertical bar followed by a c|c

!Header 1
!!Header 2
!!!Header 3
!!!!Header 4
!!!!!Header 5


Here's the code for a blockquote:
<<<
Here's the quoted text.
<<<

/*}}}*/

!Links
[[Calendar generator|http://zrenard.com/tiddlywiki/cal.php]]



Entities in HTML documents allow characters to be entered that can't easily be typed on an ordinary keyboard. They take the form of an ampersand (&), an identifying string, and a terminating semi-colon (;). There's a complete reference [[here|http://www.htmlhelp.com/reference/html40/entities/]]; some of the more common and useful ones are shown below. Also see [[Paul's Notepad|http://thepettersons.org/PaulsNotepad.html#GreekHtmlEntities%20HtmlEntitiesList%20LatinHtmlEntities%20MathHtmlEntities]] for a more complete list.

|>|>|>|>|>|>| !HTML Entities |
| &amp;nbsp; | &nbsp; | no-break space | &nbsp;&nbsp; | &amp;apos; | &apos; | single quote, apostrophe |
| &amp;ndash; | &ndash; | en dash |~| &amp;quot; | &quot; | quotation mark |
| &amp;mdash; | &mdash; | em dash |~| &amp;prime; | &prime; | prime; minutes; feet |
| &amp;hellip; | &hellip; |	horizontal ellipsis |~| &amp;Prime; | &Prime; | double prime; seconds; inches |
| &amp;copy; | &copy; | Copyright symbol |~| &amp;lsquo; | &lsquo; | left single quote |
| &amp;reg; | &reg; | Registered symbol |~| &amp;rsquo; | &rsquo; | right  single quote |
| &amp;trade; | &trade; | Trademark symbol |~| &amp;ldquo; | &ldquo; | left double quote |
| &amp;dagger; | &dagger; | dagger |~| &amp;rdquo; | &rdquo; | right double quote |
| &amp;Dagger; | &Dagger; | double dagger |~| &amp;laquo; | &laquo; | left angle quote |
| &amp;para; | &para; | paragraph sign |~| &amp;raquo; | &raquo; | right angle quote |
| &amp;sect; | &sect; | section sign |~| &amp;times; | &times; | multiplication symbol |
| &amp;uarr; | &uarr; | up arrow |~| &amp;darr; | &darr; | down arrow |
| &amp;larr; | &larr; | left arrow |~| &amp;rarr; | &rarr; | right arrow |
| &amp;lArr; | &lArr; | double left arrow |~| &amp;rArr; | &rArr; | double right arrow |
| &amp;harr; | &harr; | left right arrow |~| &amp;hArr; | &hArr; | double left right arrow |

The table below shows how accented characters can be built up by subsituting a base character into the various accent entities in place of the underscore ('_'):

|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>| !Accented Characters |
| grave accent | &amp;_grave; | &Agrave; | &agrave; | &Egrave; | &egrave; | &Igrave; | &igrave; | &Ograve; | &ograve; | &Ugrave; | &ugrave; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| acute accent | &amp;_acute; | &Aacute; | &aacute; | &Eacute; | &eacute; | &Iacute; | &iacute; | &Oacute; | &oacute; | &Uacute; | &uacute; | &nbsp; | &nbsp; | &Yacute; | &yacute; | &nbsp; | &nbsp; |
| circumflex accent | &amp;_circ; | &Acirc; | &acirc; | &Ecirc; | &ecirc; | &Icirc; | &icirc; | &Ocirc; | &ocirc; | &Ucirc; | &ucirc; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| umlaut mark | &amp;_uml; | &Auml; | &auml; |  &Euml; | &euml; | &Iuml; | &iuml; | &Ouml; | &ouml; | &Uuml; | &uuml; | &nbsp; | &nbsp; | &Yuml; | &yuml; | &nbsp; | &nbsp; |
| tilde | &amp;_tilde; | &Atilde; | &atilde; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Otilde; | &otilde; | &nbsp; | &nbsp; | &Ntilde; | &ntilde; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| ring | &amp;_ring; | &Aring; | &aring; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| slash | &amp;_slash; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Oslash; | &oslash; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| cedilla | &amp;_cedil; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Ccedil; | &ccedil; |

<HTML><a href="http://checkettsweb.com/#%5B%5BCSS-Colors%20and%20Backgrounds%5D%5D%20%5B%5BCSS-Text%20and%20Fonts%5D%5D%20OldStyleSheet%20Rin%20%5B%5BTiddlyWiki%20Structure%5D%5D">CSS Info</a></html>

<<version>>
[[Plugins]]
[[Styles]]
[[TagglyTagging]]
[[systemConfig]]
[[systemTiddler]]
[[excludeSearch]]
[[excludeLists]]
! Material

This page will discuss two topics:

1. Authenticating to Unix systems with SSH keys
2. Terminal multiplexing with GNU {{Command{screen}}}

These are both optional, but good to know and will make working with our lab systems much easier.


!! 1. Authenticating to Unix systems with SSH keys

Two mechanisms exist for SSH authentication:  
# normal passwords 
# key pairs used in asymmetric encryption
A key pair contains a private key that you keep secure and a public key that is distributed to the systems you have permission to connect to.  The private key you have is used to establish your identity. The presence of your public key on a remote system is used to establish your authorization to access it.  Private keys should be secured with a passphrase to ensure they cannot be maliciously used if they are captured by an attacker.  SSH authentication with passphrase-protected key pairs is much safer than passwords, since now an attacker must also capture the private key file in order to impersonate you.  For this reason, it is common to minimally block password authentication to a server when logging in as root or ideally only allow key authentication for all users.  More sensitive systems should require key-based authentication as part of general system hardening.  

Forcing key-based authentication gives us multi-factor authentication (MFA) when the key is properly secured with a passphrase:
# Something you have (the private key)
# Something you know (the key's passphrase)


We begin by creating a SSH keypair on the class shell server.  

{{Command{cd ~/.ssh/}}}
<<<
Change to the ~/.ssh/ directory, the default location for a user's ssh configuration files.
<<<

{{Command{ssh-keygen -t ed25519 -f  ncs205 }}}
<<<
Create a SSH key pair using default settings, except for changing the key type to ed25519 and naming the key ncs205.  The algorithm and key size can also be adjusted via flags.  The remaining defaults are reasonable.  You will be prompted to set a passphrase.  Choose something secure which you can remember.  This [[xkcd cartoon|https://xkcd.com/936/]] may be helpful.  The more entropy the better.
<<<

{{Command{ssh-copy-id -i ncs205 root@192.168.12.''//x//''}}}
<<<
Copy your public key to each of your ~VMs.  It will be saved to the file {{File{~/.ssh/authorized_keys}}} on the remote system (your VM).  The administrator may have to add the key for you on systems you're not able to log into yet.
<<<

{{Command{ssh -l root 192.168.12.''//x//''}}}
<<<
Try to connect to your test VM.  You should be prompted for a password since our private key is not in the default location and was not specified on the command line.
<<<

{{Command{ssh -i ncs205 -l root 192.168.12.''//x//''}}}
<<<
You should now be prompted for your SSH passphrase instead of password.  If an available and authorized SSH key is found it will be offered for use instead of your password.  Authentication will fall back to regular password if key-based fails.
<<<

{{Command{exit}}}
<<<
Disconnect from your VM
<<<

Having to specify the username and key file to use for each login to your ~VMs can be eliminated by using a ssh client configureation.  Edit {{File{~/.ssh/config}}} on the shell server and set a default username and ssh key for class ~VMs

Edit the file {{File{~/.ssh/config}}} and add the following:
{{{
Host test
	HostName 192.168.12.x
        
Host www
	HostName 192.168.12.x

Host *
	IdentityFile ~/.ssh/ncs205
	User root
}}}
Be sure to change the x above to your actual IP address.  This addition will also eliminate the need for specifying full IP addresses for each connection.  You'll be able to then connect with just {{Command{ssh //hostname//}}} and the IP address, user, and key file will be added for you.  Add new ~VMs to the config as they are issued to you.


!!! SSH agent - Unlock your key once for multiple connections

The SSH agent is a keyring which your SSH private keys can be attached to.  Once set up, future connections will look to that key ring when an authentication request is made instead of prompting you for your SSH passphrase each time.  The idea is one authentication event for many remote connections.

{{Command{ssh-agent > ~/.ssh/env}}}
<<<
Create a SSH agent, saving the environment information to the specified file.  This environment must be imported in order to make use of the agent.
<<<

{{Command{eval `cat ~/.ssh/env`}}}
<<<
Import the environment settings into the current shell environment
<<<

{{Command{ssh-add ~/.ssh/ncs205}}}
<<<
Add your ncs205 private key to your ssh agent keyring.  You should be prompted for its passphrase.
<<<

Once the SSH agent is established you may communicate to your lab systems without being prompted to authenticate each time.  Notice the lack of passphrase prompts:

{{Commands{
[merantn@shell ~]$ ''ssh test''
Last login: Mon Oct 19 15:18:26 2020 from 192.168.12.10

[root@test ~]# ''exit''
logout
Connection to 192.168.12.24 closed.

[merantn@shell ~]$ ''ssh www''
Last login: Mon Oct 19 15:19:51 2020 from 192.168.12.10

[root@www ~]# ''exit''
logout
Connection to 192.168.12.25 closed.
}}}


!! 2. Terminal multiplexing with GNU screen

GNU {{Command{screen}}} is a very useful tool for those working with the command line on many systems from different locations on a daily basis.  From within {{Command{screen}}}, connections can be made to many systems.  The user can detach from the screen session, change physical locations, and reconnect to their screen session continuing work where they left off.  GNU {{Command{screen}}} and ssh agents make a great combination for connecting to multiple machines over the course of your work day.

This video might help get you started:  https://www.youtube.com/watch?v=Mw6QvsChxo4

{{Command{cp ~merantn/.screenrc ~/}}}
<<<
Copy this default screen configuration file to your home directory.  This will establish some baseline settings.
<<<

If you first run the steps in Section 1 to set up ssh-agent and then launch {{Command{screen}}} to start your screen instance, your SSH Agent will be established for all screen windows.  You thus will not need to authenticate to your ~VMs as you move between them.  You will only need to run the {{Command{screen}}} command without any options once.  It will stay active with your tasks running in the background until you either terminate it or the class shell server restarts.

Screen commands:
| !Key Sequence | !Action |
| ~CTRL-a , 0 |Switch to window 0|
| ~CTRL-a , 1 |Switch to window 1|
| ~CTRL-a , 2 |Switch to window 2|
| ~CTRL-a , //n// |Switch to window //n//|
| ~CTRL-a , c |Create a new screen window|
| ~CTRL-a , " |Display available screen windows|
| ~CTRL-a , ' |Switch to a screen window by number|
| ~CTRL-a , A |Title the current screen window|
| ~CTRL-a , ? |Display screen help|

With screen now running, enter these screen commands to get things set up:
* Create a new window:  {{Command{~CTRL-a, ~CTRL-c}}}
* Switch to window 1: {{Command{~CTRL-a, 1}}}
** Connect to your test VM with ssh
* Create a new window:  {{Command{~CTRL-a, ~CTRL-c}}}
* Switch to window 2: {{Command{~CTRL-a, 2}}}
** Connect to your www VM with ssh
* Switch to window 0:  {{Command{~CTRL-a, 0}}}
** Use this window to work on the class shell server
* Detach from screen (as if you're done working for the day):   {{Command{~CTRL-a, d}}}
* Reconnect to your screen session (as though you're coming back later to continue work):  {{Command{screen -dr}}}

Now, when you disconnect from the shell server, all of your tasks will stay running in the background.  Log in again and run   {{Command{screen -dr}}} to continue where you left off.  Create new windows inside of screen as you need them for new ~VMs or to run additional tasks concurrently. 

More screen commands:
| !Key Sequence | !Action |
| ~CTRL-a , &#124; |Split window vertical|
| ~CTRL-a , S |Split window horizontal|
| ~CTRL-a , TAB |Switch between split windows|
| ~CTRL-a , X |Close a split window|
|>|>|
| ~CTRL-a , d |Detach from screen|
| ~CTRL-a , :password |Set a password for your screen session|


{{Note{[[This video|SSH]] may be a helpful demonstration}}}


3. Defeating firewalls with SSH to access protected resources

See the [[Tunnels & Proxies with SSH]] page.


/%

!! 1. Authenticating to Unix systems with SSH keys

Two different sets of keys are used with SSH:  one for securing communication between the client and server and, optionally, a set to authenticate remote users.  

!!! SSH Host keys

* Public key crypto is used for encrypting communication between client and server
* Server keys are stored in the files {{File{/etc/ssh/ssh_host_*}}}
* Fingerprints for new systems are shown and stored in the user's {{File{~/.ssh/known_hosts}}} file.  This keeps a record of trusted systems.
** This file can leak identities of systems you are communicating with
** Hash your current known hosts file if you'd like to mask the systems: {{Command{ ssh-keygen -H }}}
* Fingerprints for known systems are compared on each login to identify MITM attacks
** The user is alerted if a mismatch is found
*** This is the warning you see if you connect to a new system for the first time or there's a server change when connecting to an existing system.
** The user should take steps to verify the host key has legitimately changed.  If this change is due to a MITM attack, the attacker could capture your credentials
** Display the fingerprint of a SSH public key: {{Command{ssh-keygen -lf  //file//.pub}}}

!!!! Demo:

{{Monospaced{
[merantn@shell ~]$ ''ssh head.ncs205.net''
The authenticity of host 'head.ncs205.net (192.168.12.15)' can't be established.
ECDSA key fingerprint is ~SHA256:bHKouQIItQNr5r1Im3tI0uk2ArpfYU1Yvop0SQhOLVY.
ECDSA key fingerprint is ~MD5:9f:0d:9c:2d:f6:2c:ef:9e:6a:bb:ab:e5:4b:c5:55:e4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'head.ncs205.net' (ECDSA) to the list of known hosts.

# You can't log into this system, so press ~CTRL-C to abort:
merantn@head.ncs205.net's password:

# Here's the fingerprint of this system:
[merantn@shell ~]$ ''grep head ~/.ssh/known_hosts''
head.ncs205.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBhZIx/NElfvUL0nI/KwOotqk5Fypf01LQpn8YIe7FfXI8xnwEzESmqZTOiC791SrvOaoIxIFu9WW9xO7+BcgSw=

# Hash the hosts in the file:
[merantn@shell ~]$ ''ssh-keygen -H''
/home/merantn/.ssh/known_hosts updated.
Original contents retained as /home/merantn/.ssh/known_hosts.old
WARNING: /home/merantn/.ssh/known_hosts.old contains unhashed entries
Delete this file to ensure privacy of hostnames

Now a grep returns no results:
[merantn@shell ~]$ ''grep head ~/.ssh/known_hosts''
}}}

%/

! Additional Details

This video is a deep dive into SSH and has a lot of great info:  https://www.youtube.com/watch?v=fnkG9_jy2qc
Type the text for 'excludeLists'
Type the text for 'excludeSearch'
!! [[hack5 break-in]]

A VM named ''hack5'' has just been added for each of you.  It's presently powered on and assigned to the ''7th'' IP address of your range via DHCP.

This VM has information which can be found and vulnerabilities which can be exploited to gain access.  Required knowledge is based on our class material.  Information disclosure and some light brute forcing will yield the first flag.  There's flags to capture as your intrusion progresses to show you have increasingly gained access and to show how far you were able to get.  

Access hack5 for a little CTF (capture the flag).  These CTF challenges are a fun way to demonstrate skill, creativity, and understanding of the material.
* The "flag" to capture for flags 1, 2, 4, and 5 will look something like this:  {{Monospaced{ ''flag{//sometext//}'' }}}
* Flag 3 is the last 4 digits of Harper's U number and new lab average grade

What's the highest number flag you can access?  Capturing all five shows you have root-level access and have fully taken over the system.


Most system intrusions take advantage of misconfiguration or sloppy processes to gain a foothold and higher level of access.  Developing an understanding of how things work and following proper security practices is necessary for a defender to properly configure (and thus secure) their systems.  Understanding how things work also makes it easier for an attacker to exploit any misconfigurations.  

The first two flags will be found through basic system discovery & information disclosure.  The final flag and full system compromise will be obtained by exploiting sloppy work once you're in.


!! Scenario:

Embedded credentials are a common problem and lead to a large number of data breaches.  Web applications which pull data from other sources, like databases, must have a mechanism to authenticate to those sources.  The wiki we previously set up is an example of this.  If you were to examine your {{File{~LocalSettings.php}}} file, you would see your database credentials embedded in the file at about line 60.

Protecting those files, or having another secure mechanism to provide credentials, is critical.  Unfortunately, this is often not done properly.

The Brightspace gradebook is not designed well and is not user friendly.  For the purposes of this scenario, I've switched to my own gradebook for NCS 205 and am posting grades there instead.  You can view a sample at http://hack5.merantn.ncs205.net/ (requires SSH proxy).  ''Note:''  My copy does not have all data.

This data is fictitious.  But to provide anonymity, all grades are posted by the last four digits of the students U number instead of their name.  

It's now the end of the semester and all lab grades have been posted for these fictitious students.  Harper is not doing well and needs to bring up her lab grade to ensure a good GPA for the semester.


!! Final Objectives:

1. Access the gradebook database and change Harper's grade to at least a B+.
2. Obtain root access to the system to more easily return later

There will be flags to capture along the way as you progress through this challenge.  Flags will be in the form of {{Monospaced{ flag{//string//} }}} where //string// will vary depending on the flag.  


!! Steps:

!!! 1. Create DNS record

!!!! Task:
Create a DNS A record so the hostname {{Monospaced{hack5.//username//.ncs205.net}}} points to the ''7th'' IP address in your range.  Ensure the DNS record will propagate and be globally accessible.

!!!! Discussion:
A copy of the VM containing the class gradebook was created for each of you to work with.  Networking was configured to use the ''7th'' IP address of your range.  

Web server //Name ~VirtualHosts// are a way for multiple web sites to share the same server at the same IP address.  The web server on your hack5 VM will be configured to provide a different website depending on the hostname that was used to access it.  ~VirtualHosts are configured here, so you will not receive the correct website if you only enter the IP address in your browser instead of the valid hostname.  You //must// access the website by its hostname.


!!! 2. Access the hack5 VM

!!!! Task:

You don't know the credentials for this VM, but a little intel is posted on the website mentioned in Step 1.  Harness that intel and what we learned about the antiquated password policy from Lab 58 to break in.

!!!! Discussion:

Lab 58 involved a password spraying attack where a small list of common passwords was tried against a list of known users.

The {{Command{ncrack}}} command may be helpful here.  Use one of your ~VMs as the attacker.

The following command string may be helpful:

> {{Command{ncrack -g cd=15,CL=10,at=1 -U users.txt -P passwords.txt //~IPAddress//:22}}}

* {{File{users.txt}}} will contain a list of usernames, one username per line.  You will need to create this list of usernames to try.
* {{File{passwords.txt}}} will contain a list of passwords to try, one password per line.
* //~IPAddress// will be the IP address to attack

The Wikipedia [[list of most common passwords|https://en.wikipedia.org/wiki/List_of_the_most_common_passwords]] is a good password reference.  A copy of the 10 most common passwords in each dataset was saved to {{File{ /opt/pub/ncs205/data/hack5/top10.txt }}} on the class shell server.

The command {{Command{ awk '{ for(col=2; col <= NF; col++) if(length($col) == 6) print $col }' top10.txt }}} will extract only the 6-character passwords and return a neat list of one password per line.  Tack on additional filters to remove the duplicates.  This output will become the password list for your ncrack command.

You should discover a way into the hack5 VM with a proper {{Command{ncrack}}} command.  Your first flag will be in that user's home directory and named {{File{flag1.txt}}}.


!!!3. Access the website files and find database credentials

!!!!Task:
Once you're in, your next objective is to locate the website, the grades web application, and ultimately the database credentials.  The file {{File{grades.php}}} will contain your next flag and the database credentials.

!!!!Discussion:
The filesystem structure under {{File{ /opt/ }}} may be a good starting point.


!!!4. Access the database & change the grades

!!!!Task:
Use the database credentials from the previous step to access the database and change Harper's grades.

!!!!Discussion:
We worked a bit with databases while setting up the wiki.  The following command can be used to log in:
> {{Command{mariadb -u //username// -p //database_name//}}}
The //username// and //database_name// will be found in the grades script.  You will be prompted for the //password//, which is also in the grades script.

Once you're connected to the database and at the mariadb prompt, a ''select'' statement can be used to query the database and view data, for example:
> {{Command{ select * from grades; }}}
Be sure your database commands end with a semi-colon.

Changes to the database data can be made with an update command:
> {{Command{ update grades set //column//='//value//' where //criteria//; }}}
For example, 
> {{Command{ update grades set lab1='10' where //criteria//; }}}
The //criteria// will be some criteria to identify which rows you want to update, such as {{Monospaced{ user = 'bob' }}}.  In this example, //user// is the column name and //bob// is the value.  You will need to adjust the //criteria// to match the data you find.  Be sure to quote your strings.

Reloading the grades website will immediately show the new values once the database data is altered.

The last 4 digits of Harper's U number and new lab average grade will be flag 3


!!!5. Obtain root access to the VM and discover flags 4 & 5

!!!!Task:
There are other missteps made by the VM administrators which could be exploited for obtaining full root access to the system.  Exploit them to obtain a root shell account and obtain the flag in {{File{/root/flag5.txt}}}.  You should discover the file {{File{flag4.txt}}} along the way.

!!!!Discussion:
This one will involve a little creativity and resourcefulness.  For a hint, the files that will help you get there are also under {{File{/opt/}}}.


!! Material
This challenge will draw from material discussed in [[Week 15, Part 2]] (Access control and user management), [[Week 13, Part 1]] (Web services), Lab 58, and Section 1 of &nbsp;[[Working more efficiently with GNU screen & SSH keys]] (Authenticating to Unix systems with SSH keys)
* And will require a bit of creativity
* Linux Administration Chapter 23 also contains a lot of good info on SSH
* We haven't discussed the {{Command{ncrack}}} tool.  The ''__n__''etwork ''__crack__'' tool is useful for running brute force authentication attacks against a system.  Brute force //usually// isn't effective against a targeted host, but with proper intel, password spraying may find success.
* We also haven't discussed database commands and that material is out of scope for our class.  All necessary database commands have been provided.  If you run into trouble with the mariadb {{Command{select}}} and {{Command{update}}} commands, please reach out for help.


!! Grading
This Final Exam will be worth 20 points.  The point breakdown for the flags and responses is:

* Flags 1 & 2: 5 points each
* Flag 3: 2 points
* Flag 4: 2 points
* Flag 5: 2 points
* Page 3 question: 4 points

/%
Standard rules for lab assignments apply.  The deliverable PDF must contain your name at the top of the first page and must be properly submitted; it must be uploaded to the correct directory on the class shell server and given the correct filename.  These skills were part of the material this semester and are thus in scope for the Final exam content.  No points will be awarded if these requirements are not completed correctly.  The grading queue is visible and can be used as verification that the deliverable PDF was properly collected for evaluation.

!!!! Deliverable:
Complete and submit the [[Final Exam|exam/ncs205-final.pdf]] with your steps and flag contents.  Upload the PDF to the class shell server to the directory {{File{/opt/pub/ncs205/submit/final/}}}.  The file name must be {{File{ncs205-final-//username//.pdf}}}.

@@ ''The write-up will be due by EOD on Friday, December 15'' @@
%/
!!!! [[hack6 break-in]]

A VM named ''hack6'' has just been added for each of you.  It's presently powered on and assigned to the last IP address of your range via DHCP.

This VM has information which can be found and vulnerabilities which can be exploited to gain access.  All required knowledge is based on our class material.  Information disclosure will yield the first three flags and a vulnerability can be exploited to ultimately obtain root-level access.  There's flags to capture as your intrusion progresses to show you have increasingly gained access and to show how far you were able to get.  

Access hack6 for a little CTF (capture the flag).  These CTF challenges are a fun way to demonstrate skill, creativity, and understanding of the material.
* This VM is currently running on the last IP address of your range
** Create an A record so host name hack6 points to this IP address
* The ultimate objective is to break into the system and gain full root privileges.
* Flags to capture are in the following files:
** {{File{flag1.txt}}}
** {{File{flag2.txt}}}
** {{File{/home&#47;//user//&#47;flag3.txt}}}
** {{File{/root/flag4.txt}}}
** //user// is a placeholder for a valid user on the system.  Flag 3 is in one of the system user's home directories.
** The "flag" to capture is the string of text located inside each of those four files.  It will look something like this:  {{Monospaced{ ''flag{//sometext//}'' }}}

What's the highest number flag you can access?  Capturing all four shows you have root-level access and have fully taken over the system.

The first two flags can be obtained by interacting with services running on the VM.  The second two flags can be obtained after gaining shell access to the VM.
* Don't overthink it; the flags (especially the first three) only require basic interaction with the system
* Everyone seems to first gravitate towards brute-force tactics. This is the path of the unskilled. These are actually rarely successful when targeting a specific system or account and will not help you here.

This challenge will primarily draw from material covered [[Week 15, Part 2]] (Access control and user management), [[Week 15, Part 1]] (Scheduled Tasks), and Section 1 of &nbsp;[[Working more efficiently with GNU screen & SSH keys]] (Authenticating to Unix systems with SSH keys)
* And will require a bit of creativity
* Linux Administration Chapter 23 also contains a lot of good info on SSH
* The {{Command{nmap}}} command can be used to scan a system to identify listening services.  The basic syntax is {{Command{nmap //ip_address//}}}.
** You will need to interact with those services to obtain shell access.  You will not see this VM in the Proxmox UI and do not have a user account's password to log in directly.


Most system intrusions take advantage of misconfiguration to gain a higher level of access.  Developing an understanding of how things work is necessary for a defender to properly configure (and thus secure) their systems.  Understanding how things work also makes it easier for an attacker to exploit any misconfigurations.  

The first two flags will be found through basic system discovery & information disclosure.  Flag 2 will be accompanied by your key into the VM. The final flag and full system compromise will be obtained by exploiting a system misconfiguration.

The path to full system compromise is linear.  All flags will need to be obtained in order.  There is only one route to obtain the first three flags.  There are two different ways to obtain the fourth.  Can you find both misconfigurations which will grant root access?

!!!! Grading
The Final Exam will be worth 20 points.  The point breakdown for the flags and responses is:

* Flags 1 & 2: 5 points each
* Flag 3: 2 points
* Flag 4: 3 points
* Page 3 question: 5 points

Standard rules for lab assignments apply.  The deliverable PDF must contain your name at the top of the first page and must be properly submitted; it must be uploaded to the correct directory on the class shell server and given the correct filename.  These skills were part of the material this semester and are thus in scope for the Final exam content.  No points will be awarded if these requirements are not completed correctly.  The grading queue is visible and can be used as verification that the deliverable PDF was properly collected for evaluation.

!!!! Deliverable:
Complete and submit [[Final Exam|exam/ncs205-final.pdf]] with your steps and flag contents.  Upload the PDF to the class shell server to the directory {{File{/opt/pub/ncs205/submit/final/}}}.  The file name must be {{File{ncs205-final-//username//.pdf}}}.

@@ ''The final exam write-up will be due by EOD Thursday, May 2'' @@

Consider pacing this out over a longer period of time.  With tasks like this, it's sometimes helpful to step away for a bit if you get stuck.  That'll give you some time to think about it in the background or come back later with a fresher set of eyes and perspective.
!! less 

| !Command | !Action |
| Page Up or ''b'' | Scroll back one page |
| Page Down or space | Scroll forward one page |
| Up arrow | Scroll up one line |
| Down arrow | Scroll down one line |
| ''G'' | Move to the end of the text file |
| ''1G'' or ''g'' | Move to the beginning of the text file |
| /characters | Search forward to the next occurrence of //characters// |
| ''n'' | Search for the next occurrence of the previous search |
| ''h'' | Display help screen |
| ''q'' | Quit less |
config.macros.listTags = { text: "Hello" };
config.macros.listTags.handler = function(place,macroName,params)
{
	var tagged = store.getTaggedTiddlers(params[0],params[1]);
//<< Second parameter is field to sort by (eg, title, modified, modifier or text)
	var ul = createTiddlyElement(place,"ul",null,null,"");
	for(var r=0;r<tagged.length;r++)
	{
		var li = createTiddlyElement(ul,"li",null,null,"");
		createTiddlyLink(li,tagged[r].title,true);
	}
}
/***
|''Name:''|Plugin setDefaults|
|''Version:''|1.0.1 (2006-03-16)|
|''Source:''|http://tiddlywikitips.com/#%5B%5BPlugin%20setDefaults%5D%5D|
|''Author:''|Jim Barr (jim [at] barr [dot] net)|
|''Licence:''|[[BSD open source license]]|
|''TiddlyWiki:''|2.0|
|''Browser:''|Firefox 1.0.4+; Firefox 1.5; InternetExplorer 6.0|
!Description

These settings simply set "default" values for several system features and Plugins.
***/

/***
Standard settings:
***/
//{{{
config.options.chkRegExpSearch         = false;         // default false
config.options.chkCaseSensitiveSearch  = false;         // default false
config.options.chkAnimate              = false;          // default true
//config.options.txtUserName             = "Nick";    // default "YourName"
config.options.chkSaveBackups          = false;          // default true
config.options.chkAutoSave             = false;          // default false
config.options.chkGenerateAnRssFeed    = false;         // default false
config.options.chkSaveEmptyTemplate    = false;         // default false
config.options.chkOpenInNewWindow      = true;          // default true
config.options.chkToggleLinks          = false;         // default false
config.options.chkHttpReadOnly         = true;         // default true
config.options.chkForceMinorUpdate     = false;         // default false
config.options.chkConfirmDelete        = true;          // default true
config.options.txtBackupFolder         = "";            // default ""
config.options.txtMainTab              = "tabTimeline"; // default "tabTimeline"
config.options.txtMoreTab              = "moreTabAll";  // default "moreTabAll"
config.options.txtMaxEditRows          = "30";          // default "30"
config.options.chkInsertTabs = true;    		// tab inserts a tab when editing a tiddler

//}}}

/***
Custom Plugin settings:
***/
//{{{
config.options.chkSinglePageMode       = false;          // default "true"
config.options.chkSearchTitlesFirst       = true;
config.options.chkSearchList           = true;           // default "false"
config.messages.messageClose.text      = "X";           // default "close"
// config.views.wikified.defaultText      = "";            // default "The tiddler '%0' doesn't yet exist. Double-click to create it"
config.options.chkStepWiseNavigationOn = true;           // default "false"
config.options.chkDisableAutoSelect       =true;
config.options.chkTextAreaExtensions    =true;
//}}}
Type the text for 'systemConfig'
Type the text for 'systemTiddler'