toBala KM King


Requires Javascript.
Linux 雲端運算基礎 - 學習筆記本 - V1.2 (2012/07/05) 由大福知識聯盟設計與維護
<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml'/>
<!--}}}-->
Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity:60)';}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0em 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0em 1em 1em; left:0px; top:0px;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0em 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 .3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0em 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0em 0em 0em; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0em;}
.wizardFooter .status {padding:0em 0.4em 0em 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em 0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0em; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em 0.2em 0.2em 0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em 0.2em 0.2em 0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em 1em 1em 1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0em;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0em 0em 0.5em;}
.tab {margin:0em 0em 0em 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0em 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0em 1em;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0em 0.25em; padding:0em 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0em; font-size:.9em;}
.editorFooter .button {padding-top:0px; padding-bottom:0px;}

.fieldsetFix {border:0; padding:0; margin:1px 0px 1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0em; right:0em;}
#backstageButton a {padding:0.1em 0.4em 0.1em 0.4em; margin:0.1em 0.1em 0.1em 0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; margin:0em 3em 0em 3em; padding:1em 1em 1em 1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em 0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none ! important;}
#displayArea {margin: 1em 1em 0em 1em;}
/* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
noscript {display:none;}
}
/*}}}*/
<!--{{{-->
<div class='header' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar closeTiddler closeOthers +editTiddler > fields syncing permalink references jump'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar +saveTiddler -cancelTiddler deleteTiddler'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<!--}}}-->
To get started with this blank TiddlyWiki, you'll need to modify the following tiddlers:
* SiteTitle & SiteSubtitle: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* MainMenu: The menu (usually on the left)
* DefaultTiddlers: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
These InterfaceOptions for customising TiddlyWiki are saved in your browser

Your username for signing your edits. Write it as a WikiWord (eg JoeBloggs)

<<option txtUserName>>
<<option chkSaveBackups>> SaveBackups
<<option chkAutoSave>> AutoSave
<<option chkRegExpSearch>> RegExpSearch
<<option chkCaseSensitiveSearch>> CaseSensitiveSearch
<<option chkAnimate>> EnableAnimations

----
Also see AdvancedOptions
<<importTiddlers>>
Revision 3497
''參考文章''
1. 認識邊緣網路架構 VEB、VN-link、VEPA技術介紹 (一定要看)
http://www.netadmin.com.tw/article_content.aspx?sn=1112070005
2. A nice overview of MacVTap (without boring details about not doing what you would expect):
http://virt.kernelnewbies.org/MacVTap
3. Virtualized bridged networking with MacVTap (好文章)
http://seravo.fi/2012/10/virtualized-bridged-networking-with-macvtap
4. Here is a description of why you most likely don't want to use MacVTap yet:
http://libvirt.org/formatnetwork.html#examplesDirect (short version. Your KVM host cannot see its guest machines on the network)
5.Guest and host cannot see each other using linux-kvm and macvtap
http://superuser.com/questions/349253/guest-and-host-cannot-see-each-other-using-linux-kvm-and-macvtap

{{item1{Using a macvtap "direct" connection}}}
Since 0.9.4, QEMU and KVM only, requires Linux kernel 2.6.34 or newer This shows how to use macvtap to connect to the physical network directly through one of a group of physical devices (without using a host bridge device). As with the host bridge network, the guests will effectively be directly connected to the physical network so their IP addresses will all be on the subnet of the physical network, and there will be no restrictions on inbound or outbound connections. Note that, due to a limitation in the implementation of macvtap, these connections do not allow communication directly between the host and the guests - if you require this you will either need the attached physical switch to be operating in a mirroring mode (so that all traffic coming to the switch is reflected back to the host's interface), or provide alternate means for this communication (e.g. a second interface on each guest that is connected to an isolated network). The other forward modes that use macvtap (private, vepa, and passthrough) would be used in a similar fashion.
{{{
      <network>
        <name>direct-macvtap</name>
        <forward mode="bridge">
          <interface dev="eth20"/>
          <interface dev="eth21"/>
          <interface dev="eth22"/>
          <interface dev="eth23"/>
          <interface dev="eth24"/>
        </forward>
      </network>
}}}


The solution is to configure a macvlan interface on the hypervisor, with the same IP address than the real hardware interface (very important), and to configure routing on the host to use it. In Qemu/KVM, use a macvtap interface on the hardware interface as usual.

For my config (192.168.1.0/24 network, p10p1 hardware interface, and 192.168.1.1 gateway), it gives (on the hypervisor):
{{{
ip link add link p10p1 address 00:19:d1:29:d2:58 macvlan0 type macvlan mode bridge
ip address add 192.168.1.100/24 dev macvlan0
ip link set dev macvlan0 up

ip route flush dev p10p1
ip route add default via 192.168.1.1 dev macvlan0 proto static
}}}

<<toBalaNotes "1">>

///%1
//%/
&nbsp;
__{{item1{學習筆記本簡介}}}__
這是一個由單網頁 (HTML, CSS, JavaScript) 所建構的資訊工作平台 (Platform), 在這平台中你可以輕鬆的處理各式各樣資訊, 例如 網頁(XHTML), 可縮放向量圖形 (SVG), 數學標籤語言 (MathML), 同步多媒體集成語言 (SMIL), PDF 等, 並以 [標籤分類] 方式, 有效的將平台中的資訊, 轉換成個人知識庫.

學習筆記本 有提供 Wiki 語法, 可使妳快速產生網頁資訊, 更可以作為 Ajax 及 Java 程設人員的開發平台 (首創), 至於多媒體的展示能力更不在話下, 因這平台本體就是網頁, 當然可輕易展示圖片, 影片, 音樂 (JPG, GIF, AVI, WAV,..). 相信由這平台的實作, 你可以感覺單網頁平台所創造的資訊力 (force) 是無限的, 在下一個版本 (2.0) 已計劃整合 ''SQLite'', 這可使得 ''學習筆記本'' 大大提升其執行效率及文章儲存容量

__''@@font-size:14px;系統需求@@''__
|硬體|CPU : P4+, 記憶體 : 512 MB+|
|瀏覽器|Firefox 2.0+, IE 6.0+, Safari 3.1+|
|作業系統|Windows 2000/XP/Vista, Mac OS X|

__''@@font-size:14px;核心技術@@''__
學習筆記本核心技術是來自於 [[TiddlyWiki|http://www.tiddlywiki.com/]], 並與 TiddlyWiki 採用相同授權方式 (BSD OpenSourceLicense)

__{{item1{下載學習筆記本}}}__
點選 [[這裡|http://tbala.net/download/toBalaKMKNotepad.zip]] 下載大約 9 MB 的壓縮檔 (toBalaKMKNotepad.zip)

__{{item1{安裝與啟動學習筆記本}}}__
你只需將下載的壓縮檔 (toBalaKMKNotepad.zip) 解壓縮至資料磁碟機 (建議 USB 隨身碟), 這時你會看到ㄧ個名為 toBalaKMKNotepad 的資料夾,
在資料夾中快點二下 toBalaKMKNotepad.html, 系統會使用預設瀏覽器將之開啟 (目前只支援 IE 5.5+, Firefox 1.5+, Safari 3.1+)

''[注意]'' 切勿將壓縮檔 toBalaKMKNotepad.zip 解壓縮至 ''[桌面]'' 或 ''[名稱中有空白字元的目錄]'', 這樣操作會造成 ''學習筆記本'' 無法正常工作. 在 toBalaKMKNotepad 資料夾中, 存在的某些 .exe 檔 (tbsys.exe, tbjava.exe, tbjavac.exe), 會被防毒軟體誤判為病毒並隔離, 請自行設定防毒軟體, 將這些檔案視為安全檔案.

__{{item1{學習筆記本的目錄結構}}}__
{{{
toBalaKMKNotepad
       |--- img                   儲存圖檔 (png, jpg,...)
       |
       |--- movie                儲存影音檔 (avi, mov,...)
       |
       |--- music               儲存音樂檔 (mp3, mp4, wav...)
       |
       |--- documents         儲存各式文件檔 (pdf, text, doc,...)
       |
       |--- tools                 儲存隨身應用軟體 (zoomIt, Inkscape, hfs,...)
       |
       |--- ajax                  儲存 Ajax 程設人員的工具及程式 (HTML, CSS, JavaScript)
       |
       |--- java                   儲存 Java 程設人員所需的工具及程式 (J2SE, Tomcat,..)
       |
       |--- xml                   儲存各式 XML 標準資訊檔 (SVG, XHTML, MathML, ODF,...)
       |
       |--- db                     儲存 SQLite 所建立的資料庫
       |
       |--- jslib                   儲存 Ajax Framework
}}}

__{{item1{學習筆記本 - 版本記錄}}}__

''@@color:red;[V1.1 - 2008/09/30]@@''
1. 將 ImportTiddlersPlugin 更新為 4.3.3
2. toBalaJava 巨集升級為 toBalaJava2 巨集
3. toBalaAjax 巨集加入 [網頁文字編輯] 功能
4. toBalaFlashPlayer 巨集升級為 toBalaSWF2 巨集 (使用 SWFObject 2.1)
5. 新增 [更新土芭樂巨集程式碼] 按鈕
6. 新增 [匯出文章] 功能按鈕 (ExportTiddlersPlugin) 
7. 新增 toBalaBackup 巨集, 備份指定文章
8. 新增 toBalaCoreBackup 巨集, 備份 "土芭樂巨集" 程式
9. 修改 toBalaLIB 程式庫 (getMainTree, TagDigg, getTiddlerDIV)
10. 將 [版面管理員] 改版為 [控制台]

__{{item1{筆記本設定 (設定將存於瀏覽器 Cookies 中)}}}__

請簽名 <<option txtUserName>>
<<option chkGenerateAnRssFeed>> 儲存變更時,也儲存 RSS feed

__{{item1{版面設定 (設定將存於筆記本中)}}}__
<<toBalaManager>>

__{{item1{筆記本備份 (設定將存於瀏覽器 Cookies 中)}}}__
{{op1{1. 資訊樹}}}
備份檔名 : <<option txtBalaBackupFileName>>
備份主標籤 : <<option txtBalaTreeBody>>
其它備份文章 : <<option txtBalaBackupOthers>>

<<toBalaBackup "NotepadBackup.html">>

{{op1{2. 土芭樂巨集}}}
備份檔名 : <<option txtBalaCoreBackupFileName>>
其它備份文章 : <<option txtBalaCoreBackupOthers>>

<<toBalaCoreBackup "toBalaCoreMacro.html">>

__{{item1{巨集管理}}}__

<<loadTiddlers "label:檢查更新土芭樂巨集" http://tbala.net/x/toBalaMacro.html updates quiet>>















































///%

Linux 雲端運算基礎 - 學習筆記本

V1.1 (2011/01/30) 由大福知識聯盟設計與維護

<<tagsTree twcms "" 1 4 index label>>
<<tagsTree cloud01 "" 1 4 index label>>
<<tagsTree cloud02 "" 1 4 index label>>
<<tagsTree cloud03 "" 1 4 index label>>
<<tagsTree cloud04 "" 1 4 index label>>
<<tagsTree cloud05 "" 1 4 index label>>
<<tagsTree cloud06 "" 1 4 index label>>
<<tagsTree KMKConfig "" 1 4 index label>>
<<tagsTree menu "" 1 4 index label>>

 [[首頁]] | [[土芭樂 3.0 - 數位新思路|http://tbala.net/]]  | [[TiddlyWiki 練功坊|http://tiddlywiki.tbala.net/]] | [[匯入文章]]  | [[匯出文章]]  |  &nbsp;&nbsp;<<toBalaRun "Java-CMD.bat" "" "命令提示視窗">>&nbsp;&nbsp;<<newTiddler label:"新增文章">>&nbsp;&nbsp;<<closeAll>>&nbsp;&nbsp;<<saveChanges>> 
//%/
{{item1{1.Hub(集線器)}}}
在星狀拓樸網路(star topology)中,扮演連接或重新建立訊號的角色,可擴大類比或者是數位訊號。在區域網路(LAN)中,電腦與電腦利用網路連接時,如果用Hub 連接,即使有任何一段線路出問題,只會有一台電腦無法運作,不會影響網路中其他電腦的作業。在接收封包(Packet)進來之後,會將這個封包送到其它所有的電腦(即廣播,每一台電腦都會收到該封包),不管誰才是應該收到該封包的電腦。

{{item1{2.Bridge(橋接器)}}}
Bridge是一個用來連接不同的網路區段(segment)的設備,例如﹐可以用一個 bridge 來連接兩個 Ethernet segment、或連接一個 Token Ring segment 到一個 Ethernet segment。其主要功能在於決定是否讓資料訊框(Frame)通過Bridge 到另一端網路上,當Bridge 接收到訊框後,會將訊框目的地的MAC 位址與Bridge 的table 做比對,如果table 中能找到符合的位址,則進一步確認封包與目的地電腦是否為同一區段網路,若是的話,目的地電腦不需透過Bridge 就能收到資料(發揮filter功能);如果不是的話,Bridge 就會把訊框傳至目的地的電腦所在區段網路(發揮forward功能)。
@@color:red;P.S:雖然Bridge被用來分割網路,但是它們並不會隔離廣播或多點傳播的封包。@@

{{item1{3.Switch(交換器)}}}
Switch 的作用是在區域網路中,將網路作連接的動作。Switch 有一個table,記錄著每一台電腦的MAC 位址,當封包進來之後,Switch 會去檢查該封包的目的地的是哪一個MAC 位址的電腦,只將這個封包送給該台電腦,其他電腦則不會收到封包。
Switch的傳輸方式
(1). Cut Through:接收到目的地址後即轉發出去。延時小,但壞的資料一樣轉發。
(2). Store-and-Forward:接收到完整的資料包後,校驗好壞,好的轉發,壞的丟棄重發。傳輸可靠,但延時較長。
(3). Fragment free:接收到資料包後,大於64bytes的轉發,小於64bytes的丟棄。好壞介於上述兩種方式之間。

{{item1{4.Router(路由器)}}}
路由器是用來將網路的資訊, 使用在電腦之間傳送的基本設備, 路由器的工作在於 OSI 模式第三層(網路層),用來決定資料傳遞路徑的設備. 我們使用的IP協定就是藉由路由器將不同的IP位址連接在一起. 網路上的資料分成一段一段的封包packet, 而這些封包要指向何處便是由路由器來決定的, 路由器會根據資料的目的地, 指示正確的方向, 計算評估最便捷有效率的路徑來傳輸資料, 也就是說路由器要為封包做最佳化的工作, 找出最適當的路徑. 路由器通常最少會有兩個介面, 而這兩個介面分別區隔不同的IP網段. 例如IP分享器有WAN和LAN兩種介面, 區隔WAN的實際IP與LAN的虛擬IP網段.

Router 與 Bridge 的另一個不同點在於:
Bridge 只是單純做為一個決定是否讓封包通過的橋樑,Router 則會執行選徑功能(OSPF,EIGRP,...etc.)。
<<toBalaNotes "1">>

///%1
//%/
Apache 維基百科 : http://zh.wikipedia.org/zh-tw/Apache_HTTP_Server
Apache 軟體基金會 : http://www.apache.org/

Apache HTTP Server(簡稱 Apache)是  Apache 軟體基金會的一個開放源碼的網頁伺服器,可以在大多數電腦作業系統中運行,由於其跨平台和安全性被廣泛使用,是最流行的 Web 伺服器端軟體之一。它快速、可靠並且可通過簡單的 API 擴充,將 Perl/Python 等直譯器編譯到伺服器中。

''歷史''
Apache 起初由伊利諾大學香檳分校的國家超級電腦應用中心(NCSA)開發。此後,Apache Httpd 被開放原始碼團體的成員不斷的發展和加強。Apache Http 網站伺服器擁有牢靠可信的美譽,已經在全球超過半數的網站中被使用-特別是幾乎所有最熱門和瀏覽量最大的網站。比方說,維基百科網站伺服器就是使用 Apache 的。

剛開始發展時,Apache 只是 Netscape 網頁伺服器(現在是Sun ONE)之外的開放原始碼選擇之一。慢慢地,它開始在功能和速度超越其他基於 Unix 的 HTTP 伺服器。到了Apache 2.x 的時代,實際效率又比 Apache 1.x更快,2.x 比1.x 能同時服務更多的網頁連線數。

1996 年 4 月以來,Apache 一直是 Internet 上最流行的 HTTP 伺服器:1999年5月它在57%的網頁伺服器上運行,到了 2005 年 7 月這個比例上升到了 69%。在 2005 年11月最風光的時候達到接近 70% 的市佔率,不過在部份擁有大量域名的主機域名商轉換為微軟 IIS 平台後,Apache市佔率近年來呈現些微下滑。同時搜尋引擎巨擘Google自己的網頁伺服器平台 GWS 推出後(也可說是一種修改版的 Apache),再加上 nginx、Lighttpd 等輕量化網頁伺服器軟體在市場上有一些能見度,這些因素都反應在整體網頁伺服器市佔率的消長,Apache 的市佔率就隨之滑落。根據 Netcraft 在 2009 年12月的最新統計數據,Apache 的市佔率已經降為 53.67%,IIS 降為18.26%,谷歌網頁服務器 13.53%,nginx 8.75%。儘管如此,Apache 仍舊是當前網際網路市場上,市佔率最高的網頁伺服器軟體。

<<toBalaNotes "apache">>


///%apache
//%/
''參考文章''
1. KVM - The Linux Kernel-Based Virtual Machine (有 KVM 最新資訊)
http://www.linux-kvm.com/
2. Linux KVM virtualization gains steam in cloud computing market (必讀 : KVM 使用現況)
http://www.techworld.com.au/article/343852/linux_kvm_virtualization_gains_steam_cloud_computing_market
3.Finally user-friendly virtualization for Linux
http://www.linuxinsight.com/finally-user-friendly-virtualization-for-linux.html
4. Create a KVM-based virtual server
http://www.ibm.com/developerworks/linux/library/l-kvm-virtual-server/
5. Qemu + KVM is the future of open source virtualization
http://www.turnkeylinux.org/blog/qemu-kvm-rules
6. KVM vs. VMware: A Case Study
http://www.thevarguy.com/2009/04/27/kvm-vs-vmware-a-case-study/
7. Linux KVM as a Learning Tool (KVM 是學習 C 語言很好的環境)
http://www.linuxjournal.com/magazine/linux-kvm-learning-tool?page=0,0
8. Nested Virtualization with KVM Intel (虛中虛)
http://kashyapc.wordpress.com/2012/01/14/nested-virtualization-with-kvm-intel/

@@font-size:18pt;color:blue;
"In December 2006, ''Linus Torvalds'' announced that new versions of the Linux kernel would
include the virtualization tool known as ''KVM'' (Kernel Virtual Machine Monitor)."
@@
{{item1{KVM 簡介}}}
本文網址 : http://benjr.tw/?q=node/532

Kernel-based Virtual Machine (KVM) 是 Linux 核心的架構下的一部份,目前 KVM 支援 native virtualization 的架構. native virtualization (也有人稱為 hardware-assisted virtualization) 是由 CPU 來支援的虛擬化技術 Intel 叫做 VT(Virtualization Technology) 或者 AMD virtualization (AMD-V) .在 Linux 這兩種 CPU 透過兩個不同的 module 來支援 KVM (INTEL: kvm-intel.ko, AMD: kvm-amd.ko).在 RHEL5 update4 會自動依據 /proc/cpuinfo 的 flag 來選擇出適合的 CPU 模組 , 這 script 檔儲存在  /etc/sysconfig/modules/kvm.modules.

''[註 1]'' Ubuntu 系統的 kvm-intel.ko 存在 /lib/modules/2.6.32-23-generic-pae/kernel/arch/x86/kvm/ 這目錄
''[註 2]'' Intel 的 Virtualization Technology 可以讓處理器支援多個 OS, x86 虛擬技術請參考 : http://en.wikipedia.org/wiki/X86_virtualization

不過光靠 KVM 還無法將虛擬化的工作完成,還須配合 QEMU 做一些裝置模擬以及下列的 GNU 的軟體.
{{{
- KVM kernel module: GPL v2
- KVM user module: LGPL v2
- QEMU virtual CPU core library (libqemu.a) 和 QEMU PC system emulator: LGPL
- Linux user mode QEMU emulator: GPL
- BIOS files (bios.bin, vgabios.bin and vgabios-cirrus.bin): LGPL v2 or later
}}}
KVM 目前是由一家叫做 Qumranet 公司的 Avi Kivity 來做維護,不過目前已轉移給 Red Hat 來使用, 所以在未來 RHEL 的版本都會以 KVM 為主.
 
''KVM 細部架構圖''

[img[img/kvm/kvm_qemu01.png]]

{{item1{KVM 系統模組}}}

''下載網址 : http://sourceforge.net/projects/kvm/files/''

[img[img/kvm/downloadkvm.png]]

''kvm 目錄 :'' 存放舊的 kvm 核心模組, 檔名為 kvm-xx.tar.gz
''kvm-kmod 目錄 :'' 存放新的 kvm 核心模組 (2.6.30 以後), 檔名為 kvm-kmod-2.6.32.8.tar.bz2
''qemu-kvm 目錄 :'' 存放修改過的 qemu, 這個版本可使用 kvm 核心模組, 執行虛擬運算

''[重要]'' QEMU 1.3 這個版本開始, 可以直接使用 KVM 核心模組, 不再需要透過修過的 QEMU 版本 (qemu-kvm)
參考網站 : [[Qemu 1.3 Released: qemu-kvm merge into qemu complete|http://www.linux-kvm.com/content/qemu-13-released-qemu-kvm-merge-qemu-complete]]

{{item1{這是 維基百科 對於 KVM 的定義}}}
In computing, ''Kernel-based Virtual Machine (KVM) is a Linux kernel virtualization infrastructure''. As of 2010, KVM supports ''native virtualization'' using ''Intel VT-x or AMD-V''. ''Paravirtualization'' support is also available for ''Linux and Windows guests'' using the ''VirtIO framework''; this includes a paravirtual Ethernet card, a disk I/O controller, a balloon device for adjusting guest memory-usage, and VGA graphics interface using VMware drivers.

Architecture ports are currently being developed for s390, PowerPC, IA-64 and ARM.

The ''Linux kernel 2.6.20'' (February 2007) included the first version of KVM. KVM has also been ported to FreeBSD as a loadable kernel module.

A wide variety of guest operating-systems work with KVM, including many flavours of Linux, BSD, Solaris, Windows, Haiku, ReactOS and AROS Research Operating System and a patched version of KVM can run Mac OS X.

''Intel® Virtualization Technology List''
由這 http://ark.intel.com/VTList.aspx 網址可以知道那些 CPU  有提供 Intel VT 技術

''KVM 相關網站''
- KVM 官方網址 : http://www.linux-kvm.org/page/Main_Page
- IBM KVM 文件 (非常完整) : https://publib.boulder.ibm.com/infocenter/lnxinfo/v3r0m0/index.jsp?topic=/liaat/liaatkvmover.htm
- RedHat KVM 官方網址 : https://www.redhat.com/virtualization/rhev/server/
- Ubuntu KVM 官方網址 : https://help.ubuntu.com/community/KVM/

<<toBalaNotes "KVM">>
{{item1{KVM 主要開發者 - Avi Kivity}}}
[img[img/avi_small.jpg]]

''Avi Kivity's Overview''
{{{
Current

        KVM Maintainer at Red Hat

Past

        Developer at Qumranet
        Developer at Exanet
        Developer at Talmai

Education

        Technion-Machon Technologi Le' Israel
}}}

''Avi Kivity's Summary''
{{{
All-around software developer, with experience ranging from using a Java application server, through high performance storage servers, to x86 virtual machine monitors.

Fluent in C, C++, x86 assembler and architecture, Java, Python, Perl, SQL, bash.
Specialties

Linux kernel, high performance I/O systems, clustering, virtualization, x86 internals, networking.
}}}

{{item1{Avi Kivity's Experience}}}
''KVM Maintainer : Red Hat''
{{{
Public Company; 1001-5000 employees; RHT; Computer Software industry

September 2008 – Present (3 years 8 months)

Lead developer/maintainer of KVM open source Linux hypervisor.
}}}

''Developer : Qumranet''
{{{
Privately Held; 11-50 employees; Computer Software industry

May 2005 – August 2008 (3 years 4 months)

Software developer. Leading the development of kvm, the Linux Kernel Virtual Machine.

http://www.linux-kvm.org
}}}

''Developer : Exanet''
{{{
Privately Held; 51-200 employees; Computer Software industry

June 2002 – October 2004 (2 years 5 months)

Software developer. Design and implementation of a distributed storage system.
}}}

''Developer : Talmai''
{{{
June 1999 – May 2001 (2 years)

Architecture, design, and implementation of a distributed marketing system for credit card issuers.
Co-founder, Software Architect
Avionitek

1995 – 1999 (4 years)
}}}

''Developer : IBM Research''
{{{
Public Company; 10,001+ employees; IBM; Information Technology and Services industry

1991 – 1991 (less than a year)
}}}

網址 : http://il.linkedin.com/in/avikivity

{{item1{Difference between KVM and QEMU}}}

''Qemu''

QEmu is a complete and standalone software on it's own. You use it to emulate machines, it's very flexible and portable. Mainly it works by a special 'recompiler' that transforms binary code written for a given processor into another one (say, to run MIPS code on a PPC mac, or ARM in an x86 PC).

To emulate more than just the processor, Qemu includes a long list of peripheral emulators: disk, network, VGA, PCI, USB, serial/parallel ports, etc.

''KQemu: (虛擬與實體使用同一種 CPU 架構)''

In the specific case where both source and target are the same architecture (like the common case of x86 on x86), it still has to parse the code to remove any 'privileged instructions' and replace them with context switches. To make it as efficient as possible on x86 Linux, there's a kernel module called KQemu that handles this.

Being a kernel module, KQemu is able to execute most code unchanged, replacing only the lowest-level ring0-only instructions. In that case, userspace Qemu still allocates all the RAM for the emulated machine, and loads the code. the difference is that instead of recompiling the code, it calls KQemu to scan/patch/execute it. All the peripheral hardware emulation is done in Qemu.

This is a lot faster than plain Qemu because most code is unchanged, but still has to transform ring0 code (most of the code in the VM's kernel), so performance still suffers.

''KVM''

KVM is a couple of things: first it's a Linux kernel module (now included in mainline) that switches the processor into a new 'guest' state. The guest state has it's own set of ring states, but privileged ring0 instructions fall back to the hypervisor code. Since it's a new processor mode of execution, the code doesn't have to be modified in any way.

Apart from the processor state switching, the kernel module also handles a few low-level parts of the emulation, like the MMU registers (used to handle VM) and some parts of the PCI emulated hardware.

second, KVM is a fork of the Qemu executable. Both teams work actively to keep difference at a minimum, and there are advances in reducing it. Eventually, the goal is that Qemu should work anywhere, and if a KVM kernel module is available, it could be used. But for the foreseeable future, Qemu team focuses on hardware emulation and portability, while KVM folks focus on the kernel module (sometimes moving small parts of the emulation there, if its proven to improve performance), and interfacing with the rest of the userspace code.

The kvm-qemu executable works like normal Qemu: allocates RAM, loads the code, and instead of recompiling it, or calling KQemu, it spawns a thread (this is important); the thread calls the KVM kernel module to switch to guest mode and proceeds to execute the VM code. On a privilege instruction, it switches back to the KVM kernel module, which, if necessary, signals the Qemu thread to handle most of the hardware emulation.

One of the nice things of this architecture is that the guest code is emulated in a posix thread, which you can manage with normal Linux tools. If you want a VM with 2 or 4 cores, kvm-qemu creates 2 or 4 threads, each of them calls the KVM kernel module to start executing. The concurrency (if you have enough real cores) or scheduling (if you don't) is managed by the normal Linux scheduler, keeping code small and surprises limited.

<<toBalaNotes "2">>

///%KVM
//%/

///%2
//%/
1. NoSQL 是資料庫觀念的復興運動
http://blog.roodo.com/rocksaying/archives/15009431.html
''參考文章''
1. Webkit 瀏覽器有力威脅 IE 霸權 
http://martinoeiarchive.blogspot.com/2008/09/webkitie.html
2. 透過 OpenGL 作 WebKit 網頁描繪
http://blog.linux.org.tw/~jserv/archives/002009.html
3. JavaScript 與 Desktop - WebKit
http://blog.roodo.com/rocksaying/archives/14282187.html
4. MWC 2010:BlackBerry 新的WebKit 瀏覽器,很快啊!
http://chinese.engadget.com/2010/02/16/mwc-2010-blackberry-webkit-fast/

WebKit 官方網站 : http://webkit.org/ (找不到在 Ubuntu 安裝的手冊)

{{item1{WebKit 维基百科}}}
本文網址 : http://zh.wikipedia.org/zh-tw/WebKit

WebKit 是 Mac OS X v10.3 及以上版本所包含的軟體框架(對 v10.2.7 及以上版本也可通過軟體更新獲取)。同時,WebKit 也是Mac OS X 的 Safari 網頁瀏覽器的基礎。WebKit 是一個開源項目,主要由 KDE 的 KHTML 修改而來並且包含了一些來自蘋果公司的一些組件。

傳統上,WebKit 包含一個網頁引擎 WebCore 和一個腳本引擎 JavaScriptCore,它們分別對應的是 KDE 的 KHTML 和 KJS。不過,隨著 JavaScript 引擎的獨立性越來越強,現在 WebKit 和 WebCore 已經基本上混用不分(例如 Google Chrome 採用 V8 引擎,卻仍然宣稱自己是 WebKit 核心)。

''Linux''
2008年 10 月 22 日投入市場的 Android,其內置瀏覽器 Google Chrome Lite 是第一款 Linux 平台的穩定版 WebKit 瀏覽器,也是迄 2009 年 3 月為止整個Linux體系中與系統配合最完善的 WebKit 瀏覽器之一。能夠與之相提並論的是Palm公司第二代作業系統webOS 的內置瀏覽器,也是建基於 Linux 的 WebKit 瀏覽器。而在桌面 Linux戰線,儘管Midori孤身奮戰多年,Google Chrome 聲稱要推出 Linux 版頗有時日,GNOME 的 Epiphany、KDE 的 Konqueror,乃至 Flock 都宣言要轉向 WebKit 核心,但迄 2009 年 3 月為止,還沒有穩定版的完整WebKit 瀏覽器見於桌面 Linux。

儘管 WebKit 的原型 K 核心是由 Qt 寫成,但 Linux 下目前最受矚目的 WebKit 項目卻是 Gnome 領導的 WebKit/Gtk+。隨著奇趣科技於 2008 年 6 月被 Nokia 收購,Qt 方面也加快了 WebKit 的「回歸」進程。

{{item1{Ubuntu 與 Webkit}}}
Since ''Karmic (9.10)'', WebKit has been available in the Ubuntu archives as package ''libwebkit-1.0-2''. It can be launched as a standalone demo (for testing bugs independently of the various browsers that use it) by running ''/usr/lib/webkit-1.0-2/libexec/GtkLauncher'' 

''1. 安裝 WebKit ''
在 Ubuntu 12.04 已安裝, 所以不需要執行以下命令
{{{
$ sudo apt-get install  libwebkitgtk-3.0-0
}}}

''2. 啟動 Webkit 瀏覽器''
{{{
/usr/lib/webkitgtk-3.0-0/libexec/GtkLauncher
}}}

<<toBalaNotes "1">>



///%1
//%/
''參考文章''
1. 初探 Hadoop 開放原始碼平台環境 (很棒的文章, 一定要看, 文章開頭對雲端有詳細說明)
http://www.runpc.com.tw/content/cloud_content.aspx?id=105318

{{item1{雲端基本精神 - 網路服務}}}
@@color:red;font-size:16px;line-height:140%;
''雲端運算就是 - 「將電腦運算與資料儲存工作都放到網路上處理」'', ''雲端運算''之所以能融入各行各業,憑藉的是「形成各類網路服務」這個基本精神。惟有形成服務,才有商業行為 ($$$),方能串起供需鏈,進而促進經濟的永續發展。
@@

下文網址 : http://searchcloudcomputing.techtarget.com/sDefinition/0,,sid201_gci1380302,00.html

''XaaS'' is a collective term said to stand for a number of things including ''X as a service, anything as a service'' or ''everything as a service.'' The acronym refers to an increasing number of services that are delivered over the ''Internet'' rather than provided ''locally or on-site''. ''XaaS'' is the essence (精髓) of cloud computing.

The most common examples of  ''XaaS'' are Software as a Service (''SaaS''), Infrastructure as a Service (''IaaS'') and Platform as a Service (''PaaS''). The combined use of these three is sometimes referred to as the ''SPI model (SaaS, PaaS, IaaS)''. Other examples of XaaS include storage as a service (''SaaS''), communications as a service (''CaaS''), network as a service (''NaaS'') and monitoring as a service (''MaaS'').

Following the convention of pronouncing "SaaS" as "sass," "XaaS" is sometimes pronounced as "zass." 

<html><img src="img/xaas/XaaSTree.png" width="100%" height="100%" /> </html>

''[註]'' X 代表空格, 可代入任何字

{{item1{雲端產業}}}
雲端造就兩個新興產業,一是 ''雲(資料中心)'' 的產業供應鏈,二是 ''端(行動裝置)'' 的產業供應鏈。''雲'' 的發展將是地理集中而標準化,而 ''端'' 的發展將是移動與隨身而多元化。

{{item1{雲端服務模式 - SPI (SaaS, PaaS, IaaS) Model}}}
雲端服務商的市場區隔,分別為:(1)架構即服務(Infrastructure as a Service,IaaS)、(2)平台即服務(Platform as a Service,PaaS)與(3)軟體即服務(Software as a Service,SaaS)。Amazon EC2 的服務就是典型的 ''架構即服務'',讓用戶可自行租賃所需的運算、儲存與網路資源。而 ''Google App Engine'' 則是典型的 ''平台即服務'',讓用戶可以在共通的平台上進行程式開發或軟體佈署。大家常用的 ''Yahoo、Gmail、Facebook'' 等都算是 ''軟體即服務'' 的範疇,過去購買套裝軟體的模式,如今已逐漸典範轉移,改依需求變化,動態地變更租賃軟體服務的授權數量,用多少付多少。從企業經營成本面來看,過去採購硬體的固定成本,如今轉變成 ''變動營運成本'',在金流調度上將更有彈性。

<html><img src="img/xaas/XaaSV1.png" width="100%" height="100%" /> </html>

''1. IaaS 架構即服務應用範例'' : Ubuntu 所建置的 Ubuntu One, 硬體系統是建構在 Amazon EC2, 軟體開發平台使用 [[CouchDB|http://couchdb.apache.org/]]

''[[2. 台灣雲端運算|http://www.facebook.com/taiwancloudcomputing]]''
有人去訪問了 Facebook 的 CTO Bret Taylor 而他說,現在回頭看起來,當初創業 (FriendFeed) 的時候沒有採用 ''AWS'' 而決定自己買伺服器來架是一大錯誤,因為花時間和心力去維護那些機器實在太累人了。

{{item1{雲端佈署}}}
雲端佈署可分為 ''1.公用雲端 (Public Cloud)'', ''2. 私有雲端 (Private Cloud)'',''3. 混合雲端 (Hybrid Cloud) '' 及不常聽到的 ''4. 社群雲端(Community Cloud''。''公用雲端'' 顧名思義就是全球一起共用某個雲端服務廠商的資源池,多半適用於 ''資料機密性低'' 的新創事業或中小企業;而 ''私有雲端'' 則是由企業自行建置共享資源池,故任何資料 ''機密性高'' 的產業都可能需要自行打造。''混合雲端'' 泛指混合了上述二種雲端型態,須基於標準或運用新技術來確保資料與應用程式在不同平台間的移植性。

{{item1{雲端基本特徵}}}
雲端特徵有''(1)隨需自助服務(On-demand self-service)、(2)隨時隨地用任何網路裝置存取(Broad network access)、(3)多人共享資源池(Resource pooling)、(4)快速重新佈署靈活度(Rapid elasticity),和(5)被監控與量測的服務(Measured Service)''。

這五個基礎特徵的典型代表當然是 Amazon Web Service。專家們認為未來進入雲端運算的時代,每個系統管理員將不再需要擔憂採購硬體的時間與流程,而是改以租賃方式,動態地根據需求,自助啟動或關閉所負責的資訊服務,而這些服務都是透過各種網路裝置來存取。此外,由於全球不同企業共享這些軟硬體甚至人力資源,從統計與機率上,若產業異質性高,便能有效地互補彼此尖峰使用的時間,間接降低了不同企業的資訊成本。當然為了達成動態調配共享資源的目的,雲端服務供應商必須具備快速重新佈署的靈活度,好讓各種被釋放出來的資源能馬上被其他客戶使用。最後,任何雲端服務都涉及計價行為,因此所有服務必須能被監控與量測,好統計租賃的帳單金額。

<<toBalaNotes "1">>

{{item1{雲端運算改變生活文化與產業型態}}}
本文網址 : http://www.zdnet.com.tw/news/hardware/0,2000085676,20148241,00.htm

對一般使用者而言,雲端運算是與資料中心毫無關連的詞彙。儘管雲端對一般人而言毫無感覺,但是它卻對運算、企業、甚至社交環境造成深遠的影響。

由微軟的策略團隊最近釋出的一份白皮書 The Economics of the Cloud,闡述了這些改變的經濟基礎。雲端運算提供了可能對文化造成衝擊的規模經濟,例如與 Facebook 及 Twitter 相關的文化。而且,當訂閱式軟體的價格與雲端結合,將會對與 IT 廠商、服務供應商、甚至 IT 環境相關的商業模式造成中斷。

經濟是打造產業轉型的強而有力的驅動力。雲端服務的出現對整個 IT 經濟再度造成基礎的改變。雲端技術讓 IT 資源群聚與標準化,並且讓許多目前以人工進行的維護工作得以自動化。

許多 IT 領導人面臨著一個問題,那就是 80% 的預算是花在維護現有的服務與基礎架之上。使得能夠用來創新的資源不足,或是無法滿足不斷發生的新商業與使用者要求。雲端運算將可以釋放重要的資源,將這些資源重新導引到創新之上。

{{item1{雲端觀點,Self-Service+On-Demand 是雲端服務重點}}}
本文網址 : http://www.dotblogs.com.tw/jimmyyu/archive/2010/06/06/self-service-and-on-demand-cloud-computing.aspx
原文網址 : [[The Competitive Threat of Public Clouds|http://www.datacenterknowledge.com/archives/2010/06/04/the-competitive-threat-of-public-clouds/]]

Rodrigo Flores 在上面這篇文章中提到三個很發人省思的論點,看完之後覺得挺有意思的,分別是以下三項:
{{op1{Give up the fight}}}
IT Staff 應該撇開過去負責維持 IT 營運的角色,轉換成 IT 資源與企業營運的中介(Broker),過去 IT Staff 負責讓機器運轉好、讓系統正常執行,現在他們可以有更多的時間思考如何讓企業營運更加敏捷,如何資訊系統更貼近使用者所要的,拉高層級開始構想如何讓企業更好,而不只著眼在如何讓機器運轉正常。

{{op1{Define your Model T Ford (Model T Ford 的意思可以參考 Wiki)}}}
這邊意指的是先制定出一個標準與範本(Model T),有了標準後,依此範本來發展服務,因為少了標準與範本,全部的營運細節都要完全客製化(Custom tailored),那對於預估所需資源、成本都是不踏實的。

{{op1{Think like an ATM}}}
這邊以 ATM 為例,說明了何謂自我服務(Self-Service),他說雖然銀行都有櫃台,但很多人還是會選擇在 ATM 交易,主要的原因還是便利,如果我們提供的服務也能讓消費者有相同的應用方式,那會是多美好的一件事情,消費者能自己找尋、訂閱、追蹤、管理、使用這樣的服務,過程中都不需要有其他人參與,Self- Service 讓使用者自己決定他想做些甚麼,ATM 的概念,透過適當的語音導引,簡易的畫面,讓消費者可以很輕易的上手使用,他想要什麼服務,他自己選,自己執行,而不用填寫單子後交給櫃員,由櫃員幫他提款、查詢餘額,這整個過程,他都在自我服務。

以上三個論點都很有意思,但我最感興趣的還是 Self-Service 這部分,Self-Service+On-Demand 的概念,大概就可以詮釋了Cloud Service 的意義,這也跟過去 On-Premise 的模式有很大的區別,過去我們將系統裝在客戶端,要修改什麼設定、要增加授權、要擴充Server 數、要提高頻寬等,都需要一一改變基礎建設或服務,而且 IT Staff 也要隨伺在側,確認調整後一切是運作正常的,頗讓人感到戰戰兢兢,花費的人力、成本與衍生的風險都是可觀的;而 Cloud 的概念則用以補足這部分的缺憾,將這些需求標準化後,由 Cloud Providers(IaaS、PaaS、SaaS) 提供服務,消費者只要透過簡易的介面就可以達到類似 ATM 操作的效果,在Cloud Providers的SLA(Service Level Agreement)下,IT Staff可以放心的執行這些服務(但還是要了解這是要收錢的),讓 IT Staff 專注更重要的事,是很有力的訴求,只要 Service 確實可靠。

過去我們做系統開發,會提供很多層級的客製化,包含系統參數、個人化參數、客製程式等等,但在 Cloud 上,客製化是必要的,但我們需要更多方的思考,如何讓使用者能 Self-Service,並發揮 On-Demand 的真正優勢。 
<<toBalaNotes "2">>

{{item1{The Everything as a Service (XaaS) Model}}}
本文網址 : http://www.linux-mag.com/id/7197

Almost everyone knows the term Software as a Service (SaaS) and that it refers to an Application Service Provider (ASP) that hosts software which can be accessed from anywhere. It isn’t common knowledge, however, that there are several other traditionally localized services that are now being managed by Cloud vendors and ASPs. This practice is now so commonplace that marketing and technical types alike are simply referring to this myriad of leveraged services as Everything as a Service (XaaS).

''SaaS is certainly the oldest of the “as a Service” (aaS) models and the most widely accepted''. In its infancy, a decade or so ago, SaaS was as simple as using a subscription-based, industry-specific search tool such as Westlaw. Nowadays, SaaS companies such as Salesforce.com offer a world-class Customer Relationship Management (CRM) package are virtually (no pun intended) indistinguishable from their product.

Other aaS models include Communication, Infrastructure, Monitoring, and Platform. The common thread amongst all these is the “on-demand” nature of their offerings. On-demand models work and are nothing new to businesses. Mainframe time is still being sold as an on-demand service as it has been for the past 40-plus years.

The on-demand model works well because it is based on usage — if you use the service for 2 hours per month, you only pay for 2 hours of usage. As your business experiences ebbs and flows, your service usage reflects those changes and so will your bill for those services.

''Communication as a Service (CaaS)'' is a generic term for several different but related services. Under the broad CaaS umbrella, you have Voice Over IP (VoIP also sometimes referred to as Voice as a Service (VaaS)), remote automated call distribution (ACD), hosted Private Branch Exchange (PBX), and others. Be aware though that CaaS is not as mature as SaaS and therefore the quality is sometimes less than desirable. However, if you’re looking for inexpensive ways to use high-end telephony, CaaS is a very competitive option as Skype users will tell you.

''Infrastructure as a Service (IaaS)'' is a favorite news topic and the focal point of much buzz these days. The finest example of IaaS is Amazon’s EC2 (Elastic Compute Cloud) services. Virtualized servers and their resources (CPU, Memory, and Disk space) describe the basic components of IaaS. Those resources are dynamically allocated and scaled based on usage.

IaaS also includes Desktop computing infrastructure that replace traditional desktops with virtualized ones.

One of the more interesting aaS offerings is ''Monitoring as a Service (MaaS)'' because you can keep your localized infrastructure and setup inexpensive monitoring for your systems and services. You also have total control over which devices are monitored, polling intervals, and monitoring methods (ping, HTTP GET, etc.). You can also setup email, SMS, and other notifications for failed services through the vendor’s control panel software.

Platform as a Service (PaaS) is the newest entry into the as a Service fray. The best example of PaaS that I’ve seen is Force.com’s PaaS. Sign up as a Developer and take the tour. Their PaaS offering encompasses a whole range of other services including User Interface, Logic, Integration, and Database; all as services. Force.com is brought to you by Salesforce.com and is a very impressive selection of services and tools for leveraging Cloud Computing resources for your applications. Spend some time checking it out.

Cloud Computing Service Models

* Communication (CaaS : 卡死)
* Infrastructure (IaaS : 愛死)
* Monitoring (MaaS : 罵死)
* Platform (PaaS : 怕死)
* Software (SaaS : 嚇死)

''One of the greatest advantages of the on-demand and aaS model is that it creates an even playing field on which small companies can compete with larger ones''. Smaller enterprises leveraging these on-demand services can compete with larger, well-established businesses using a fraction of the initial cash outlay ordinarily required to purchase hardware, software, and to hire the experienced personnel to setup and maintain those services.

Another advantage is that you can buy in to a single service or a complete array to create your own virtual company. Where does your company exist? Everywhere.

Everything as a Service is a good idea and not just in theory — XaaS can help your business go toe-to-toe with the big guys with very little up-front cash and minimal investment in time to get started. Where will you be tomorrow — still plugging and tugging at traditional hardware and software resources or embracing the future with XaaS?

{{item1{坐看風雲險中生}}}
本文網址 (寫的很好, 上述內容參考本文) : http://www.hadoop.tw/2010/07/trend-of-cloud-and-the-rising-hadoop.html

隨著網際網路的普及,各種連網裝置讓我們的生活更加地便利。然而隨著人們產生資訊的速度越來越快,數量愈來愈多,大量通訊不僅塞爆了實體的網路頻寬,無形中也正侵蝕著你我大腦的資訊頻寬。為了解決資訊爆炸所造成的困擾,順應隨時隨地連網的趨勢,替企業達成精簡資訊成本的目標,一種嶄新的軟體服務模式與資訊架構革命正以撲天蓋地之姿席捲全球,Google執行長施密特(Eric Schmidt)於2006年8月9日SES'06會議中首次使用「雲端運算(Cloud Computing)」來形容無所不在的網路服務,緊接著Amazon更以 Elastic Compute Cloud(簡稱EC2)命名其虛擬運算資源服務。2007年10月8 日,Google 與 IBM 發佈合作新聞稿,擬於美國境內七所大學教授如何處理海量資料的高速計算技術,從此引爆雲端運算的討論熱潮。 2008年時值全球金融風暴肆虐,多數人均擔心雲端運算只是一時的口號。2009 年 4 月 24 日,美國國家標準局(NIST)起草雲端運算的標準定義;於同年5月,歐巴馬政府更明白地在2010年度國會預算書上,指出雲端運算是重要施政重點[5]。隨即,英國、日本、韓國等亦相繼宣佈將建立政府雲端的計畫,作為振興經濟的強心針。2010 年被媒體喻為台灣的雲端元年,但每當談到雲端運算,不知大家是否心中仍存著諸多疑惑,冷眼看那不同廠商各說各話。底下就讓我們細說從頭,先給大家一個關於雲端運算的正式定義、看看雲端的技術演進史,最後說明為何 Hadoop 這個處理海量資料的軟體平台會逐漸被國際資訊大廠所重視。

''笑談雲端五四三''

漫畫裡的呆伯特對老闆突然丟來的奇怪問題,總能用簡單的一句話敷衍過關。那麼如果要您用一句話簡單說明什麼是雲端運算,那該如何解釋呢?且讓小弟不聰贈送各位正經版與幽默版各一句來當作應對妙招,正經版可答道「雲端運算就是隨時隨地運用各種裝置來存取想要的服務」,幽默版可笑曰「雲端運算談的不外乎五四三(台語)啦~」倘若有人反譏您胡說八道,還有雲端口訣可以背得對方聽:「一個基本精神、兩個極端產業、三個服務模式、四個佈署模型、五個基礎特徵」,這不恰恰就是五四三二一嘛?!關於這雲端口訣的五四三可不是隨便說說,真是取自美國國家標準局(NIST)的雲端標準定義呢[6]!以下為各位看倌逐一說個明白。

所謂「五個基礎特徵」,包括:(1)隨需自助服務(On-demand self-service)、(2)隨時隨地用任何網路裝置存取(Broad network access)、(3)多人共享資源池(Resource pooling)、(4)快速重新佈署靈活度(Rapid elasticity),和(5)被監控與量測的服務(Measured Service)。這五個基礎特徵的典型代表當然是 Amazon Web Service。專家們認為未來進入雲端運算的時代,每個系統管理員將不再需要擔憂採購硬體的時間與流程,而是改以租賃方式,動態地根據需求,自助啟動或關閉所負責的資訊服務,而這些服務都是透過各種網路裝置來存取。此外,由於全球不同企業共享這些軟硬體甚至人力資源,從統計與機率上,若產業異質性高,便能有效地互補彼此尖峰使用的時間,間接降低了不同企業的資訊成本。當然為了達成動態調配共享資源的目的,雲端服務供應商必須具備快速重新佈署的靈活度,好讓各種被釋放出來的資源能馬上被其他客戶使用。最後,任何雲端服務都涉及計價行為,因此所有服務必須能被監控與量測,好統計租賃的帳單金額。

至於「四個佈署模型」,指的是(1)公用雲端(Public Cloud)、(2)私有雲端(Private Cloud),及不常聽到的(3)社群雲端(Community Cloud)、(4)混合雲端(Hybrid Cloud)。公用雲端顧名思義就是全球一起共用某個雲端服務廠商的資源池,多半適用於資料機敏性低的新創事業或中小企業;而私有雲端則是由企業自行建置共享資源池,故任何資料機敏性高的產業都可能需要自行打造。社群雲端係指由多個組織共享資源池,故較合適具共享研究資料需求的學研單位合資建置;而混合雲端泛指混合了上述三種雲端型態,須基於標準或運用新技術來確保資料與應用程式在不同平台間的移植性。

關於「三個服務模式」,也可稱之為不同雲端服務商的市場區隔,分別為:(1)架構即服務(Infrastructure as a Service,IaaS)、(2)平台即服務(Platform as a Service,PaaS)與(3)軟體即服務(Software as a Service,SaaS)。Amazon EC2的服務就是典型的架構即服務,讓用戶可自行租賃所需的運算、儲存與網路資源。而Google App Engine則是典型的平台即服務,讓用戶可以在共通的平台上進行程式開發或軟體佈署。大家常用的Yahoo、Gmail、Facebook等都算是軟體即服務的範疇,過去購買套裝軟體的模式,如今已逐漸典範轉移,改依需求變化,動態地變更租賃軟體服務的授權數量,用多少付多少。從企業經營成本面來看,過去採購硬體的固定成本,如今轉變成變動營運成本,在金流調度上將更有彈性。

口訣最後的「兩個極端產業,一個基本精神」,談的是未來雲端將持續往兩個極端發展,一邊是雲(資料中心)的產業供應鏈,一邊是端(行動裝置)的產業供應鏈。雲的發展將是地理集中而標準化,而端的發展將是位置變異而多元化。而雲端運算之所以能融入各行各業,憑藉的是「形成網路服務」這個基本精神。惟有形成服務,才有商業行為,方能串起供需鏈,進而促進經濟的永續發展。

''回顧資通六十載''

談完雲端的五四三,相信各位已對雲端運算有比較清晰的概念。那麼雲端運算代表的是全新技術嗎?其實不然,雲端運算只是軟體演化必然的趨勢。讓我們一同回顧資通產業過去六十年的發展:1960年代運算主力是超級電腦,1970年代才有個人電腦誕生,1980年代制訂 TCP/IP 通訊協定,1990 年初期因為有 GNU 自由軟體基金會與 Linus 的貢獻,才產生第一次高速運算革命,讓個人電腦叢集(PC Cluster)以企鵝雄兵打敗超級電腦,大幅降低運算成本,平行運算(Parallel  Computing)頓時蔚為顯學。1990年代後期網路逐漸導入金融機構,分散式物件技術才逐漸流行。演化至2000年初,SETI@Home尋找外星人計畫,向全球電腦用戶借螢幕保護的運算資源,開啟分散式運算(Distributed Computing)的新頁。隨即全球高速電腦中心起而效法分散式運算精神,提倡格網運算(Grid Computing),擬能建立虛擬組織共享全球運算資源。格網運算雖立意良善,卻因遲遲無法形成服務而難以進入產業應用。直至2006 年 Amazon 推出 Elastic Compute Cloud(EC2)才成功地建立隨需運算服務,開啟雲端運算的序曲。

歷經超級電腦、平行運算、分散運算與格網運算,雲端運算繼承了平行運算的叢集技術、融合了分散運算的容錯特性,發展出「資料中心即電腦(Data Center as a Computer)」的新思維。有別於格網運算將資料搬到有空閒計算資源的思維,雲端運算強調把工作丟到資料所在主機去計算。在雲端的年代,資訊人惟有不斷充實多核心、平行、分散式運算與網頁程式設計等技能,才有機會住進未來那朵大雲裡。

''喜獲吉象過重山''

看完以上的趨勢分析,或許您不禁想問:「縱使知道雲端趨勢很重要,若想打造私有雲端,下一步該做什麼呢?總是得腳踏實地,而不是空口畫雲吧~」若您想打造私有雲端,可參考如下圖中所列舉不同技術分層的自由軟體,來建置您專屬的雲端服務。目前有專家學者警告企業導入雲端不該只是採購硬體或導入虛擬化。小弟認為這只是導入雲端第一個階段,後續將視各企業屬性不同而導入平台即服務與軟體即服務的相關技術。倘若您的企業需要架構即服務,擬導入虛擬層與控制層,這裡建議的自由軟體包括:Xen、KVM、OpenNebula 與 Eucalyptus 這幾套。若您需要建置的是資料分析的平台服務,那接下來要介紹的 Hadoop 與美國資料探勘中心研發的 Sector/Sphere 都是首選。

本文最後要談的是 Hadoop 這個海量資料分析的軟體平台,它有何特殊呢?為何 Google 與 IBM 合作教授雲端課程採用它?為何Yahoo 搜尋引擎、 Facebook 社交網站、Last.fm 網路電台、Joost 網路電視等均採用它來進行資料分析呢?首先,倘若說 Google是當今最能引導技術風潮的品牌,想必大家不得不同意。那麼 Google 的關鍵技術是什麼呢?Google 有三大關鍵技術:其一是Google File System,為了提升營運容錯率,Google 重新設計了一套分散式檔案系統來存放全球的網頁內容、電子郵件、照片、影片等;其二是 MapReduce 演算法,Google 宣稱多數的資料分析都可以靠此一演算法解決;其三是 BigTable 分散式資料庫,Google 將存在 Google File System 中的資料,經過 MapReduce 運算後,轉換成巨大的資料表,這個資料表有別於傳統關聯式資料庫,僅存放成對的 Key-Value。因此,當您下達一個查詢網頁或查詢信件的動作前,Google早已將所有網頁、信件進行拆解分類存進 BigTable,查詢下達時只是從 BigTable 查 Key等於關鍵字對應的所有 Value罷了。

然而 Google 的三大關鍵技術沒有原始碼或軟體可讓其他企業安裝使用,好在 Google 佛心來著,分別公開了Google File Ssystem、MapReduce 與 BigTable 的三篇論文,讓 Hadoop 這套軟體平台的創始人Doug Cutting 能參考論文,重新實作了Hadoop Distributed File System(HDFS)與 MapReduce API。諸多過去無法分析的資料,如今有了 Hadoop 軟體平台後,都開始可以分析了。例如:電信業者如今可以分析手機在基地台漫遊的特性,提供更好的在地費率。信用卡業者如今可以每天定期分析各種信用貸款所產生的風險,動態調整信貸利率。便利超商如今可以分析消費者的購買習慣,動態調整架上存貨數量。甚至新興產業,如生醫資訊、智慧電網等都可以運用 Hadoop 平台來進行資料探勘與趨勢預測。若您的企業有儲存海量資料的需求,有分析海量資料的需求,或者面臨資料庫過度龐大,正在尋求分散式資料庫或資料倉儲的技術,那Hadoop或許是一個已通過商業驗證的選擇。只要懂得駕馭 Hadoop 這隻大象,它將能駝著您爬過一座座資料大山,幫您犁出隱藏在群眾中的邏輯。

<<toBalaNotes "3">> 


///%1
其實回歸原點還是要看樓主您的需求才能比較,我現在都用日常交通工具的方式來說明雲端這件事:
1.買主機放在公司就像自己買車:雖然一次付出比較多錢,壞了要花錢修,但想去那就去那,自由度高。
2.IaaS就像租車:,一次付出的比自己買主機少的多,也不用管維修,自由度也算高,但缺點是長期用或用量高不一定便宜。
3.PaaS像搭計程車:自由度比租車少一點,但優點是有司機開車,讓您更省點事,但一樣依使用量算錢。
4.SaaS就像搭大眾運輸系統:自由度最少,但優點就是便宜,上面跑的就是套裝軟體,沒有客製的空間。//%/

///%2
//%/

///%3
//%/

''參考文章''
1. 伺服器安裝與設定 - Apache HTTP Server
http://linux.nchc.org.tw/intro_to_linux/part4/apache.html

{{item1{更新套件情單}}}
{{{
$ sudo apt-get update
}}}

{{item1{Apache 2 安裝}}}

''$ sudo apt-get install apache2''
{{{
[sudo] password for student:
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
以下套件是被自動安裝進來的,且已不再會被用到了:
  linux-headers-2.6.32-21 linux-headers-2.6.32-22
  linux-headers-2.6.32-22-generic-pae
使用 'apt-get autoremove' 來將其移除。
下列的額外套件將被安裝:
  apache2-mpm-worker apache2-utils apache2.2-bin apache2.2-common libapr1
  libaprutil1 libaprutil1-dbd-sqlite3 libaprutil1-ldap
建議套件:
  apache2-doc apache2-suexec apache2-suexec-custom
下列【新】套件將會被安裝:
  apache2 apache2-mpm-worker apache2-utils apache2.2-bin apache2.2-common
  libapr1 libaprutil1 libaprutil1-dbd-sqlite3 libaprutil1-ldap
升級 0 個,新安裝 9 個,移除 0 個,有 1 個未被升級。
需要下載 3,328kB 的套件檔。
此操作完成之後,會多佔用 10.1MB 的磁碟空間。
是否繼續進行 [Y/n]?y
下載:1 http://tw.archive.ubuntu.com/ubuntu/ lucid/main libapr1 1.3.8-1build1 [116kB]
下載:2 http://tw.archive.ubuntu.com/ubuntu/ lucid/main libaprutil1 1.3.9+dfsg-3build1 [85.4kB]
下載:3 http://tw.archive.ubuntu.com/ubuntu/ lucid/main libaprutil1-dbd-sqlite3 1.3.9+dfsg-3build1 [27.1kB]
下載:4 http://tw.archive.ubuntu.com/ubuntu/ lucid/main libaprutil1-ldap 1.3.9+dfsg-3build1 [25.1kB]
下載:5 http://tw.archive.ubuntu.com/ubuntu/ lucid/main apache2.2-bin 2.2.14-5ubuntu8 [2,622kB]
下載:6 http://tw.archive.ubuntu.com/ubuntu/ lucid/main apache2-utils 2.2.14-5ubuntu8 [159kB]
下載:7 http://tw.archive.ubuntu.com/ubuntu/ lucid/main apache2.2-common 2.2.14-5ubuntu8 [290kB]
下載:8 http://tw.archive.ubuntu.com/ubuntu/ lucid/main apache2-mpm-worker 2.2.14-5ubuntu8 [2,364B]
下載:9 http://tw.archive.ubuntu.com/ubuntu/ lucid/main apache2 2.2.14-5ubuntu8 [1,482B]
取得 3,328kB 用了 2s (1,345kB/s)
選取了原先未被選取的套件 libapr1。
(正在讀取資料庫 ... 系統目前共安裝了 159384 個檔案和目錄。)
正在解開 libapr1 (從 .../libapr1_1.3.8-1build1_i386.deb)...
選取了原先未被選取的套件 libaprutil1。
正在解開 libaprutil1 (從 .../libaprutil1_1.3.9+dfsg-3build1_i386.deb)...
選取了原先未被選取的套件 libaprutil1-dbd-sqlite3。
正在解開 libaprutil1-dbd-sqlite3 (從 .../libaprutil1-dbd-sqlite3_1.3.9+dfsg-3build1_i386.deb)...
選取了原先未被選取的套件 libaprutil1-ldap。
正在解開 libaprutil1-ldap (從 .../libaprutil1-ldap_1.3.9+dfsg-3build1_i386.deb)...
選取了原先未被選取的套件 apache2.2-bin。
正在解開 apache2.2-bin (從 .../apache2.2-bin_2.2.14-5ubuntu8_i386.deb)...
選取了原先未被選取的套件 apache2-utils。
正在解開 apache2-utils (從 .../apache2-utils_2.2.14-5ubuntu8_i386.deb)...
選取了原先未被選取的套件 apache2.2-common。
正在解開 apache2.2-common (從 .../apache2.2-common_2.2.14-5ubuntu8_i386.deb)...
選取了原先未被選取的套件 apache2-mpm-worker。
正在解開 apache2-mpm-worker (從 .../apache2-mpm-worker_2.2.14-5ubuntu8_i386.deb)...
選取了原先未被選取的套件 apache2。
正在解開 apache2 (從 .../apache2_2.2.14-5ubuntu8_i386.deb)...
正在進行 man-db 的觸發程式 ...
正在進行 ufw 的觸發程式 ...
正在進行 ureadahead 的觸發程式 ...
ureadahead will be reprofiled on next reboot
正在設定 libapr1 (1.3.8-1build1) ...

正在設定 libaprutil1 (1.3.9+dfsg-3build1) ...

正在設定 libaprutil1-dbd-sqlite3 (1.3.9+dfsg-3build1) ...
正在設定 libaprutil1-ldap (1.3.9+dfsg-3build1) ...
正在設定 apache2.2-bin (2.2.14-5ubuntu8) ...
正在設定 apache2-utils (2.2.14-5ubuntu8) ...
正在設定 apache2.2-common (2.2.14-5ubuntu8) ...
Enabling site default.
Enabling module alias.
Enabling module autoindex.
Enabling module dir.
Enabling module env.
Enabling module mime.
Enabling module negotiation.
Enabling module setenvif.
Enabling module status.
Enabling module auth_basic.
Enabling module deflate.
Enabling module authz_default.
Enabling module authz_user.
Enabling module authz_groupfile.
Enabling module authn_file.
Enabling module authz_host.
Enabling module reqtimeout.

正在設定 apache2-mpm-worker (2.2.14-5ubuntu8) ...
 * Starting web server apache2                                                  
apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1 for ServerName
                                                                         [ OK ]
正在設定 apache2 (2.2.14-5ubuntu8) ...

正在進行 libc-bin 的觸發程式 ...
ldconfig deferred processing now taking place
}}}

''@@color:red;[問題]@@'' 由上述安裝資訊, Apache 2 已啟動那些 Module ?

{{item1{檢視 Apache 2 的內部設定值}}}

''$ /usr/sbin/apache2 -V''
{{{
Server version: Apache/2.2.14 (Ubuntu)
Server built:   Apr 13 2010 19:29:28
Server's Module Magic Number: 20051115:23
Server loaded:  APR 1.3.8, APR-Util 1.3.9
Compiled using: APR 1.3.8, APR-Util 1.3.9
Architecture:   32-bit
Server MPM:     Worker
  threaded:     yes (fixed thread count)
    forked:     yes (variable process count)
Server compiled with....
 -D APACHE_MPM_DIR="server/mpm/worker"
 -D APR_HAS_SENDFILE
 -D APR_HAS_MMAP
 -D APR_HAVE_IPV6 (IPv4-mapped addresses enabled)
 -D APR_USE_SYSVSEM_SERIALIZE
 -D APR_USE_PTHREAD_SERIALIZE
 -D SINGLE_LISTEN_UNSERIALIZED_ACCEPT
 -D APR_HAS_OTHER_CHILD
 -D AP_HAVE_RELIABLE_PIPED_LOGS
 -D DYNAMIC_MODULE_LIMIT=128
 -D HTTPD_ROOT=""
 -D SUEXEC_BIN="/usr/lib/apache2/suexec"
 -D DEFAULT_PIDLOG="/var/run/apache2.pid"
 -D DEFAULT_SCOREBOARD="logs/apache_runtime_status"
 -D DEFAULT_ERRORLOG="logs/error_log"
 -D AP_TYPES_CONFIG_FILE="/etc/apache2/mime.types"
 -D SERVER_CONFIG_FILE="/etc/apache2/apache2.conf"
}}}

@@color:red;''[註]'' 由上面資訊中, 可以得知 Apache 2 的主要設定檔是 ''/etc/apache2/apache2.conf''@@

{{item1{停止 Apache 2}}}
{{{
$ sudo /etc/init.d/apache2 stop
 * Stopping web server apache2                                                   
... waiting                                                             [ OK ]
}}}

{{item1{啟動 Apache 2}}}
{{{
$ sudo /etc/init.d/apache2 start
 * Starting web server apache2                                           [ OK ]
}}}

{{item1{重新啟動 Apache 2}}}
{{{
$ sudo /etc/init.d/apache2 restart
}}}

{{item1{Troubleshooting Apache}}}

在啟動或重新啟動, 如出現以下錯誤訊息 : 
{{{
$ sudo /etc/init.d/apache2 restart
[sudo] password for student:
 * Restarting web server apache2                                                
apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1 for ServerName
 ... waiting apache2: Could not reliably determine the server's fully qualified domain name, 
using 127.0.1.1 for ServerName                                   [ OK ]
}}}
then use a text editor such as ''nano'' to create a new file
{{{
$ sudo nano /etc/apache2/conf.d/fqdn
}}}
then add
{{{
ServerName localhost
}}}
to the file and save. This can all be done in a single command with the following:
{{{
echo "ServerName localhost" | sudo tee /etc/apache2/conf.d/fqdn
}}}

''再一次啟動 Apache 2''
{{{
$ sudo /etc/init.d/apache2 restart
 * Restarting web server apache2                                                
 ... waiting                                                             [ OK ]
}}}

<<toBalaNotes "apache2">>


///%apache2
//%/



Google 維基百科 : http://zh.wikipedia.org/zh-tw/Google

Google搜尋項目是由兩名史丹佛大學的理學博士生拉里·佩奇和謝爾蓋·布林在1996年早期建立的,他們開發了一個對網站之間的關聯做精確分析的搜尋引擎,此搜尋引擎的精確度勝於當時使用的基本搜尋技術。當時項目被稱作BackRub,因為系統會檢查backlinks(反向鏈結),以評估站點的重要性。

由於深信從其他高相關網站得到最多鏈結的網頁一定是最有關的頁面,佩奇,Hubert Chang和布林決定把這作為他們研究的一部分進行測試,這為他們的搜尋引擎打下了基礎。他們正式在1998年9 月7日在位於加州門洛帕克的朋友的車庫裡建立了谷歌公司。在稍後搬到“Googleplex” 之前,他們在1999年2月先搬進了加州的帕羅奧多大學街165號辦公。那裡是很多著名的矽谷新建立技術公司薈萃的地方。

Google搜尋引擎以它簡單,乾淨的頁面設計和最有關的搜尋結果贏得了網際網路使用者的認同。搜尋頁面裡頭的廣告以關鍵字的形式出售給廣告主。為了要使頁面設計不變而且快速,廣告以文字的形式出現。這種以關鍵字賣廣告概念本來是Overture[6]開發的(即原來的Goto.com)。當大部份的網路公司倒下時,Google則一直安靜地在穩步發展著並開始盈利。

2001 年9月,Google 的網頁評級機制PageRank被授予了美國專利。專利正式地被頒發給史丹佛大學,Lawrence Page作為發明人列於檔案中。

2003 年2月,Google 接管了部落格ger的所有者Pyra實驗室,一個主導網誌網路服務的先鋒。似乎這與Google的使命矛盾。然而,這實際上鞏固了公司從網誌發布改善Google新聞搜尋的速度和其搜尋相關性的能力。

2004 年初的一個最高峰時期,透過它的網站及其客戶網站如雅虎,美國線上和CNN,Google處理了全球資訊網上的80%的搜尋請求。[8]Google的份額在2004 年2月跌落一些,因為雅虎放棄了Google的搜尋技術, 決定獨力開發自己的搜尋引擎。

Google做事的行為準則是不作惡(Don't be evil),他們的站點時常包括富有幽默感的特徵,譬如他們的主頁面的Logo偶爾會於特定時日出現應景的變化(這樣的logo稱為doodle),還提供幻想或幽默的語言介面,如克林貢語(科幻電視劇《星艦奇航記》中克林貢人的語言)和Leet語,還有愚人節里公司的笑話。

很多人推測Google對於雅虎的反應是藉由從Orkut,Gmail和Froogle搜集到的個人資訊,Google下一步將會引入個性化搜尋,事實上,在Google實驗室網站裡,有一個個性化的搜尋測試頁面。

2005 年7月19日,Google在中國設立研發中心。

2005 年12月20日,Google宣布斥資10億美元收購網際網路服務供應商美國線上5%的股權。

2006 年4月12日,Google全球CEO埃里克·施密特在中國北京宣布Google的中文名字為「谷歌」,推出Google.cn。Google自此正式進入中國。

2006年10 月,Google以16.5億美元,收購影音內容分享網站YouTube,是Google有史以來最大筆的併購。

2010年1月,Google谷歌宣告可能結束中國大陸,起因於中國大陸網路人士對谷歌發動網路攻擊。但後來公司又將「中國大陸嚴苛的網路審查制度」作為其結束大陸市場的理由。

北京時間2010年3月23日凌晨,谷歌將其中國內地網站Google.cn關閉。針對該網站的搜尋自動轉向谷歌香港網站 Google.com.hk,並不再過濾搜尋內容。但是谷歌手機版ditu.google.cn仍然對搜尋結果進行過濾。
Google的logo去掉陰影前(上)和後(下)的設計

2010年4月3日,Google將「Google谷歌」這個名稱移除,改為「Google中國」。

2010年4月4日,Google.com.hk將Logo名稱改回「Google谷歌」。

2010年5月5日,Google開始大面積啟用新的UI JAZZ搜尋介面,Google的logo標誌也有所改變,去掉了以前的陰影設計。

<<toBalaNotes "google">>


///%google
//%/
 Linux KVM 下是可以建立 Windows 的虛擬機的 , 目前在我的 proxmox ve 下就可以建立 XP 及 Windows 2003/2008 , 但有一些重點是要和大家分享的

我想大部分有玩過 Vmware 系列產品的人應該知道 Vmware 有 vmware tools 可以安裝 , vmware tools 內就有一些 driver 可增進效能 , 而在 KVM 的世界中有個叫 virtio 的設備 , 例如 virtio 網卡 , virtio 的硬碟 , 有的 OS 有支援 , 例如我測試過 CentOS 5.4 就有該網卡與硬碟的 driver , 而在 Windows 下則沒有 !

但好家在 , RedHat 很有良心開發了 Windows 版本的 virtio driver , 目前 RedHat 所開發的 driver 支援性如下

    * Windows XP : 只有網卡有支援 32/64bit , 硬碟部分只支援到 32bit , 所以 XP 跑 32bit 版本最好
    * Windows 2003/2008/VISTA/Widows7 : 支援網卡及硬碟到 32/64 bit

能用 virtio 就盡量用 , 這對性能很有幫助 , 要下載 Windows 的 VirtIO Driver 請至下面連結可以找到最新版本

http://www.linux-kvm.org/page/WindowsGuestDrivers/Download_Drivers

而 Google 上可能會找到其他 RedHat 版本的 driver 可能有點舊了 , 甚至沒有 XP 的硬碟 Driver , 所以這個 kvm 官方網站上最新的就有支援 , 隨時可以到這個連結去看看有沒有最新版的 Driver , 搞不好以後有 XP 64bit 的硬碟 Driver


順帶一提 , 由於我的 Server 沒有 floppy , 所以安裝 Windows 2003 時 , 沒辦法直接裝 virtio 硬碟的 driver , 這樣就沒有辦法識別到硬碟 , 而 Windows 2008 的安裝程式有支援從 CD-ROM 載入驅動 , 可以在安裝期間切換 CD-ROM 到 Driver 的 ISO 檔 , 所以沒這問題 , 而且 proxmox ve 目前也沒有支援虛擬的 floppy driver , 所以我就用一種偷吃步來做

   1. 首先 , 在建立 KVM Win2003 時 , 仍是將網卡設定為 virtio , 硬碟是 IDE , 先不啟動虛擬機
   2. 進入 Hardward 新增一顆 1G 的 virito HDD , 檢查一下 Options 裡頭第一顆開機蝶是不是 IDE
   3. 啟動虛擬機 , 開始安裝 Windows 2003 , 此時安裝的時候 , 會裝在 IDE 那顆
   4. Windows 2003 裝好之後 , 回到 proxmox ve 的管理介面將 CD-ROM 切到 virtio driver 做成的的 ISO 檔 , 此時再由 Windows 2003 中更新驅動
   5. 更新驅動程式後 , 應該硬體裝置管理員都有識別到 RedHat 的網卡及 SCSI 了 , 同時也會多出一顆硬碟 , 然後將 Windows 2003 關機
   6. 現在從 proxmox ve 管理介面將 virtio HDD 做 Delete , Delete 掉之後會發現 Unused disk images , 也就是說 raw 檔還在 , 做這個動作只是把裝置刪除 , 並不會把檔案刪除  , 要把檔案真正刪除 , 還要對該 raw 檔做 Remove From Disk
   7. 將 IDE HDD Delete 掉 , 但千萬不要做 Remove From Disk
   8. 現在應該沒有任何硬碟可以用 , 於是我們再新增一顆硬碟 , 管理介面中可以用現成的檔案當硬碟 Use an existing virtual disk
   9. 於是這次新增時候選 virtio 介面 , 並且使用原本的 RAW 建立好之後就可以重開虛擬機了

上述方式 , XP 也可以如法泡製 , 根據測試 , 在我的 Gigabit 網路下 , 透過網路芳鄰從 Win2003 VM 抓大檔到我的桌機 , 會有 40MBytes/s 的傳輸速度 , 這樣就等於至少有 400Mbps 的速度 , 這樣的表現非常好 , 若是用 e1000 的網卡及 IDE 硬碟設定 , 下載速度不到 20MB
''參考文章''
1. MeeGo手機版開放社群預覽
http://www.runpc.com.tw/news.aspx?id=100627


@@font-size:34px;[["Nokia 從 MeeGo 移情別戀到 Windows Phone7 殺的 Intel 措手不及"|http://chinese.engadget.com/2011/02/14/nokias-marginalization-of-meego-came-as-a-surprise-to-intel/]]@@


MeeGo 官方網址 : http://meego.com/

MeeGo 是一個基於 Linux 的行動作業系統計劃,它於2010年 2月的全球行動通訊大會中宣佈  ,主要的推動者為諾基亞與英特爾。

它將結合諾基亞 Maemo 中基於 Qt 的用戶介面及英特爾Moblin的核心軟體平台。 相容性方面,MeeGo將支援英特爾的 AppUp 及諾基亞的 Ovi 數位發行平台。 MeeGo 將同時支援 ARM 和 x86 的處理器。

''版本歷史''
Intel Developer Forum 2010 中宣佈,MeeGo 以六個月作為發佈周期。2010年5月26日開始提供 Netbook 版本的下載。 2010年5月27日,MeeGO官 方正式發布 v1.0 小筆電體驗版和 N900 版 MeeGo v1.0 Core Software 平台。

{{item1{MeeGo Software Architecture Overview}}}
本文網址 : http://meego.com/developers/meego-architecture

The MeeGo platform has been carefully created to provide the components necessary for the best device user experience. As shown in the MeeGo Reference Architecture Overview diagram below, the MeeGo architecture is divided into three layers:

* The MeeGo OS Base layer contains the Linux kernel and core services along with the Hardware Adaptation Software required to adapt MeeGo to support various hardware architectures.
* The MeeGo OS Middleware layer provides a hardware and usage model independent API for building both native applications and web run time applications.
* The MeeGo User Experience layer provides reference user experiences for multiple platform segments; the initial MeeGo release will contain reference user experiences for handhelds and netbooks and additional platform segments will be supported over time.

[img[img/MeeGoArch1.png]]

<<toBalaNotes "meego">>

''AMD 參予 MeeGo 計畫 (2010/11/17)''

AMD周一宣布該公司已經決定加入MeeGo公開程式碼Linux計畫,並將更深入的參予行動作業系統的開發。

依據AMD所說,該公司將提供工程和技術上的訣竅給予該計畫。MeeGo是一個次世代的行動作業系統。

AMD還提到,透過參與MeeGo計畫將有助於該公司即將推出的accelerated processing units (APUs)擴展新市場的契機。在上周該公司的分析師會議中,AMD宣布有關新的APUs與其對於行動運算的展望。

///%meego
//%/
''參考文章''
1. SettingUpNFSHowTo (Ubuntu 正式文件)
https://help.ubuntu.com/community/SettingUpNFSHowTo
2. GlusterFS (整合 Hadoop 的 NFS Server)
http://www.gluster.org/
2. Use Linux as a SAN Provider (這一篇寫的很清楚)
http://www.linuxjournal.com/magazine/use-linux-san-provider
3. Using iSCSI On Ubuntu 10.04 (Initiator And Target)
http://www.howtoforge.com/using-iscsi-on-ubuntu-10.04-initiator-and-target
4. using shared storage with libvirt/KVM?
http://permalink.gmane.org/gmane.comp.emulators.libvirt.user/252

{{item1{NFS Server 安裝與設定}}}
''1. 安裝 nfs-kernel-server 套件''
{{{
$ sudo apt-get install nfs-kernel-server
}}}

若用 apt-get 安裝,則會自動一起安裝 nfs-common 和 portmap 這二個套件

''2. 修改 /etc/exports 設定檔''
{{{
$ sudo nano /etc/exports
                 :
/home/student/nfs  *(rw,sync)  
}}}

代表將 /home/student/nfs 資料夾開放給所有(*)的client,可以讀寫(rw) 

''3. 啟動 NFS server''
{{{
$ sudo /etc/init.d/nfs-kernel-server start
}}}

''4. 檢查 NFS server 是否成功被啟動了''
{{{
$ sudo showmount -e localhost
}}}

{{item1{NFS Client 安裝與設定}}}
''1. 安裝 nfs-common 套件''
{{{
$ sudo apt-get install nfs-common
}}}

''2. 檢視 NFS server 共享路徑''
{{{
$ showmount -e nfs_server_ip
}}}

''3. 掛載 NFS Server 共享路徑''
先在 /mnt/ 目錄, 產生 nfs 目錄, 然後執行以下命令 
{{{
$ mount -t nfs  nfs_server_ip:共享路徑  /mnt
}}}

<<toBalaNotes "1">>

///%1
//%/

TurnKey 官方網站 : http://www.turnkeylinux.org/

{{item1{認識 TurnKey Core - Common Base for All Appliances}}}
This is the common base system on top of which all TurnKey Linux appliances are built, or in other words our appliances lowest common denominator. It includes the live installer, configuration console, web interface, automatic daily package updates and all other common features (and bugs). Take a look at some screenshots.

''下載網址 :'' http://www.turnkeylinux.org/core

{{item1{安裝 TurnKey Core 11}}}

[img[img/turnkey/TurnKeyCore11-ins01.png]]

[img[img/turnkey/TurnKeyCore11-ins02.png]]

[img[img/turnkey/TurnKeyCore11-ins03.png]]

[img[img/turnkey/TurnKeyCore11-ins04.png]]

[img[img/turnkey/TurnKeyCore11-ins05.png]]

[img[img/turnkey/TurnKeyCore11-ins06.png]]

<<toBalaNotes "1">>
{{item1{設定 TurnKey Core}}}

[img[img/turnkey/TurnKeyCore11-set01.png]]

[img[img/turnkey/TurnKeyCore11-set02.png]]

[img[img/turnkey/TurnKeyCore11-set03.png]]

[img[img/turnkey/TurnKeyCore11-set04.png]]

<<toBalaNotes "2">>

{{item1{網路設定}}}

[img[img/turnkey/TurnKeyCore11-set05.png]]

[img[img/turnkey/TurnKeyCore11-set06.png]]

[img[img/turnkey/TurnKeyCore11-set07.png]]

[img[img/turnkey/TurnKeyCore11-set08.png]]

''[註]'' TurnKey Core 11 版沒有提供 dhclient 命令, 以至使用選單中的 DHCP 項目, 無法正常從 DHCP Server 取得 IP 位址, 要解決此問題, 請在 TurnKey 系統安裝 dhcp-client 套件, 命令如下 :
{{{
$ sudo apt-get install dhcp-client
}}}

<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. Virt-Manager 0.8.4 Adds new UI Features
http://www.linux-kvm.com/content/virt-manager-084-adds-new-ui-features
2. KVM - The Linux Kernel-Based Virtual Machine : http://www.linux-kvm.com/
3. Virtual Machine Manager 官網 : http://virt-manager.et.redhat.com/

The ''Virtual Machine Manager'' application (virt-manager for short package name) is a desktop user interface for managing virtual machines. It presents a summary view of running domains, their live performance & resource utilization statistics. The detailed view graphs performance & utilization over time. Wizards enable the creation of new domains, and configuration & adjustment of a domain's 

{{item1{由 Ubuntu 套件庫安裝 (版本不會是最新)}}}
Install virt-manager on your desktop:

''1. 安裝 virt-manager 套件''
方法一 :  from a Command Line
{{{
$ sudo apt-get install virt-manager
[sudo] password for student:
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  libgtk-vnc-1.0-0 python-glade2 python-gtk-vnc python-vte
建議套件:
  python-gtk2-doc hal python-guestfs python-spice-client-gtk
下列【新】套件將會被安裝:
  libgtk-vnc-1.0-0 python-glade2 python-gtk-vnc python-vte virt-manager
升級 0 個,新安裝 5 個,移除 0 個,有 6 個未被升級。
需要下載 420 kB 的套件檔。
此操作完成之後,會多佔用 3,865 kB 的磁碟空間。
是否繼續進行 [Y/n]?
}}}

方法二 :  from the Ubuntu Software Center:
{{{
* Applications -> Ubuntu Software Center -> Search for "virt-manager"
* Install "Virtual Machine Manager" 
}}}

''2. 啟動 虛擬機器管理員''

[img[img/kvm/kvmmenu.png]]

{{item1{設定使用 root 權限啟動 虛擬機器管理員}}}

[img[img/kvm/KVManager01.png]]

''在下圖中的 "指令" 欄位輸入 "gksudo virt-manager''

[img[img/kvm/KVManager02.png]]

''[註]'' 如要使用 ''登入帳號'' 啟動 虛擬機器管理員, 請將 ''登入帳號'' 加入 libvirtd 群組
<<toBalaNotes "1">>
{{item1{檢視 Virtual Machine Manager 設定}}}

''1. 點選 [編輯][Connection Details]''

[img[img/kvm/KVMHost01.png]]

''2. 檢視 KVM 虛擬主機的基本資訊''

[img[img/kvm/KVMHost02.png]]

''3. 點選 [虛擬網路] 標籤''

[img[img/kvm/KVMHost03.png]]

''4. 點選 [Storage] 標籤''

[img[img/kvm/KVMHost04.png]]

''[註]'' 點選 [New Volume] 按鈕, 可產生硬碟映像檔 (qcow2, vmdk, dmg,...)

''5. 點選 [Network Interface] 標籤''

[img[img/kvm/KVMHost05.png]]

''[註]'' 在 Ubuntu 10.04.2 沒有提供這功能, Fedora 12 可使用這功能, 快速建立 Bridge 介面, 請參考以下網址 :
http://www.linux-kvm.com/content/bridged-networking-virt-manager-083

<<toBalaNotes "2">>
{{item1{VMM 自動安裝}}}

1. 執行 VMFactory/VirtualManager 目錄中的 insvmm.sh 程式

{{item1{由原始程式編譯安裝最新版本}}}
以下操作必須在 Ubuntu 桌面系統執行

''1. 安裝編譯所需的相依套件''
{{{
$ sudo apt-get install build-essential python-libvirt python-gtk-vnc python-gnome2-desktop-dev python-urlgrabber intltool libxml2-dev libvirt-dev libgtk2.0-dev libglade2-dev libgtk-vnc-1.0-dev
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  autoconf automake autotools-dev cvs debhelper dpkg-dev fakeroot g++ g++-4.4
  gettext html2text intltool-debian libatk1.0-dev libcairo2-dev
  libdirectfb-dev libdirectfb-extra libexpat1-dev libfontconfig1-dev
  libfreetype6-dev libgcrypt11-dev libglib2.0-dev libgnomecups1.0-1
  libgnomeprint2.2-0 libgnomeprint2.2-data libgnomeprintui2.2-0
  libgnomeprintui2.2-common libgnutls-dev libgpg-error-dev libice-dev
  libjpeg62-dev libmail-sendmail-perl libpango1.0-dev libpixman-1-dev
  libpng12-dev libpthread-stubs0 libpthread-stubs0-dev libsm-dev
  libstdc++6-4.4-dev libsys-hostname-long-perl libsysfs-dev libtasn1-3-dev
  libx11-dev libxau-dev libxcb-render-util0-dev libxcb-render0-dev libxcb1-dev
  libxcomposite-dev libxcursor-dev libxdamage-dev libxdmcp-dev libxen3-dev
  libxext-dev libxfixes-dev libxft-dev libxi-dev libxinerama-dev libxrandr-dev
  libxrender-dev m4 patch po-debconf python-bugbuddy python-evince
  python-evolution python-gnomedesktop python-gnomeprint python-gtop
  python-mediaprofiles python-metacity python-rsvg python-totem-plparser
  x11proto-composite-dev x11proto-core-dev x11proto-damage-dev
  x11proto-fixes-dev x11proto-input-dev x11proto-kb-dev x11proto-randr-dev
  x11proto-render-dev x11proto-xext-dev x11proto-xinerama-dev xtrans-dev
  xz-utils zlib1g-dev
建議套件:
  autoconf2.13 autoconf-archive gnu-standards autoconf-doc libtool dh-make
  debian-keyring debian-maintainers g++-multilib g++-4.4-multilib gcc-4.4-doc
  libstdc++6-4.4-dbg gettext-doc libcairo2-doc libgcrypt11-doc glade
  glade-gnome libglib2.0-doc python-subunit gnutls-doc gnutls-bin guile-gnutls
  libgtk2.0-doc libpango1.0-doc libstdc++6-4.4-doc diffutils-doc
  libmail-box-perl bug-buddy python-gnome2-desktop-doc
  python-gnome2-desktop-dbg
下列【新】套件將會被安裝:
  autoconf automake autotools-dev build-essential cvs debhelper dpkg-dev
  fakeroot g++ g++-4.4 gettext html2text intltool intltool-debian
  libatk1.0-dev libcairo2-dev libdirectfb-dev libdirectfb-extra libexpat1-dev
  libfontconfig1-dev libfreetype6-dev libgcrypt11-dev libglade2-dev
  libglib2.0-dev libgnomecups1.0-1 libgnomeprint2.2-0 libgnomeprint2.2-data
  libgnomeprintui2.2-0 libgnomeprintui2.2-common libgnutls-dev
  libgpg-error-dev libgtk-vnc-1.0-dev libgtk2.0-dev libice-dev libjpeg62-dev
  libmail-sendmail-perl libpango1.0-dev libpixman-1-dev libpng12-dev
  libpthread-stubs0 libpthread-stubs0-dev libsm-dev libstdc++6-4.4-dev
  libsys-hostname-long-perl libsysfs-dev libtasn1-3-dev libvirt-dev libx11-dev
  libxau-dev libxcb-render-util0-dev libxcb-render0-dev libxcb1-dev
  libxcomposite-dev libxcursor-dev libxdamage-dev libxdmcp-dev libxen3-dev
  libxext-dev libxfixes-dev libxft-dev libxi-dev libxinerama-dev libxml2-dev
  libxrandr-dev libxrender-dev m4 patch po-debconf python-bugbuddy
  python-evince python-evolution python-gnome2-desktop python-gnomedesktop
  python-gnomeprint python-gtk-vnc python-gtop python-libvirt
  python-mediaprofiles python-metacity python-rsvg python-totem-plparser
  python-urlgrabber x11proto-composite-dev x11proto-core-dev
  x11proto-damage-dev x11proto-fixes-dev x11proto-input-dev x11proto-kb-dev
  x11proto-randr-dev x11proto-render-dev x11proto-xext-dev
  x11proto-xinerama-dev xtrans-dev xz-utils zlib1g-dev
升級 0 個,新安裝 95 個,移除 0 個,有 0 個未被升級。
需要下載 31.5MB 的套件檔。
此操作完成之後,會多佔用 107MB 的磁碟空間。
是否繼續進行 [Y/n]?y

}}}

''2.取得 virt-manager, virt-install 及 virt-viewer 原始碼''
{{{
$ cd ~
$ mkdir virt
$ cd virt
$ wget http://virt-manager.et.redhat.com/download/sources/virt-manager/virt-manager-0.8.7.tar.gz
$ wget http://virt-manager.et.redhat.com/download/sources/virtinst/virtinst-0.500.6.tar.gz
$ wget http://virt-manager.et.redhat.com/download/sources/virt-viewer/virt-viewer-0.2.0.tar.gz
}}}

''3. 編譯與安裝 virt-manager''
''Virt-manager is a desktop tool (written in Python) for managing virtual machines.'' It provides the ability to control the life cycle of existing machines (bootup/shutdown, pause/resume, suspend/restore), provision new virtual machines, manage virtual networks, access the graphical console of virtual machines and view performance statistics.
{{{
$ cd ~/virt
$ tar xvzf virt-manager-0.8.7.tar.gz
$ cd virt-manager-0.8.7/
$ ./configure
$ make
$ sudo make install
}}}

''[注意]'' Maybe the ''./configure'' will return with errors. It's because there are some unsatisfied dependencies. You will probably need to do a "aptitude search missingPack" and install the missing package with ''sudo apt-get install missingPack-dev''.

''4. 編譯與安裝 virt-install''
The ''Virt Install'' tool (virt-install for short command name, virtinst for package name) is a command line tool which provides an easy way to provision operating systems into virtual machines. It also provides an API to the virt-manager application for its graphical VM creation wizard. 
{{{
$ cd ~/virt
$ tar -xvzf virtinst-0.500.6.tar.gz
$ cd virtinst-0.500.6/
$ sudo python setup.py install
}}}

''[註]'' Unlike virt-manager, virt-install is a command line tool that allows you to create KVM guests on a headless server. You may ask yourself: "But I can use vmbuilder to do this, why do I need virt-install?" The difference between ''virt-install'' and ''vmbuilder'' is that vmbuilder is for creating Ubuntu-based guests, whereas virt-install lets you install all kinds of operating systems (e.g. Linux, Windows, Solaris, FreeBSD, OpenBSD) and distributions in a guest, just like virt-manager.

''5. 編譯與安裝 virt-viewer''
The ''Virtual Machine Viewer'' application (virt-viewer for short package name) is a lightweight interface for interacting with the graphical display of virtualized guest OS. It uses ''GTK-VNC'' as its display capability, and libvirt to lookup the VNC server details associated with the guest. It is intended as a replacement for the traditional vncviewer client, since the latter does not support SSL/TLS encryption of x509 certificate authentication. 
{{{
$ cd ~/virt
$ tar -xvzf virt-viewer-0.2.0.tar.gz
$ cd virt-viewer-0.2.0/
$ ./configure
$ make
$ sudo make install
}}}

''6. 重新開機''
一定要重新開機, 否則在 "應用程式" 選單中, 看不到 "系統工具 -> 虛擬機器管理員"
{{{
$ sudo reboot
}}}

///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. FAQ-KVM
http://www.linux-kvm.org/page/FAQ#Is_dynamic_memory_management_for_guests_supported.3F
2. kvm memory ballooning is unusable in Lucid (必讀)
http://web.archiveorange.com/archive/v/LLXmLutCmRbFOGXS7jJy
3. Cloud Computing is Memory Bound
http://www.intalio.com/cloud-computing-is-memory-bound

{{item1{Is dynamic memory management for guests supported ?}}}
''A.'' KVM only allocates memory as the guest tries to use it. Once it's allocated, KVM keeps it. Some guests (''namely Microsoft guests'') zero all memory at boot time. So they will use all memory.'' (一次佔滿)''

''B.'' Certain guests (''only Linux at the moment'') have a balloon driver, so the host can have the guest allocate a certain amount of memory which the guest won't be able to use anymore and it can then be freed on the host. ''Ballooning'' is controlled in the host via the balloon monitor command. ''(用多少佔多少)''

''[註] Ubuntu 10.04 內建 balloon driver'' 
{{{
$ ls /lib/modules/2.6.32-33-generic-pae/kernel/drivers/virtio/
virtio_balloon.ko
}}}

''C.'' Some hosts have a feature called ''KSM (Kernel Sharedpage Merging)'', which collapses together identical pages; this requires kernel support on the host, as well as a kvm new enough to opt in (參加) to the behavior. As some guest platforms (most notably Windows) zero out free'd memory, such pages are trivially collapsed. ''(共用相同 Memory pages)''

{{item1{Virtio balloon}}}
本文網址 : http://rwmj.wordpress.com/2010/07/17/virtio-balloon/  (必讀)

After someone asked me a question about ''balloons'' (in the virtualization sense) today, I noticed that there is not very much documentation around. This post explains what the KVM virtio_balloon driver is all about.

First of all, what is a balloon driver if you’ve never even heard of the concept? It’s a way to give or take RAM from a guest. (In theory at least), if your guest needs more RAM, you can use the balloon driver to give it more RAM. Or if the host needs to take RAM away from guests, it can do so. All of this is done without needing to pause or reboot the guest. ''(不停機狀態下改變客座主機 (Guest OS) 的記憶體大小)''

You might think that this would work as a RAM “hot add” feature, rather like hot adding disks to a guest. Although RAM hot add would be much better, currently this is not how ballooning works.

What we have is a kernel driver inside the guest called virtio_balloon. This driver acts like a kind of weird process, either expanding its own memory usage or shrinking down to nearly nothing, as in the diagrams below:

[img[img/kvm/balloon-expanded.png]]

[img[img/kvm/balloon-shrink.png]]

When the balloon driver expands, normal applications running in the guest suddenly have a lot less memory and the guest does the usual things it does when there’s not much memory, including swapping stuff out and starting up the OOM killer (Out of memory Killer). (The balloon itself is non-swappable and un-killable in case you were wondering).

So what’s the point of a kernel driver which wastes memory? There are two points: Firstly, the driver communicates with the host (over the virtio channel), and the host gives it instructions (“expand to this size”, “shrink down now”). The guest cooperates, but doesn’t directly control the balloon.

Secondly, memory pages in the balloon are unmapped from the guest and handed back to the host, so the host can hand them out to other guests. It’s like the guest’s memory has a chunk missing from it:

[img[img/kvm/balloon-chunk.png]]

Libvirt has two settings you can control called currentMemory and maxMemory ([[“memory” in the libvirt XML|http://libvirt.org/formatdomain.html#elementsResources]]):

[img[img/kvm/balloon-labels.png]]

''maxMemory (or just <memory>)'' is the memory allocated at boot time to a guest. KVM and Xen guests currently cannot exceed this. currentMemory controls what memory you’re requesting to give to the guest’s applications. The balloon fills the rest of the memory and gives it back to the host for the host to use elsewhere.

{{item1{重設虛擬主機 MaxMemory}}}
You can adjust this manually for your guests, either by ''editing the XML'', or by using the ''virsh setmem'' command.

''方法一''
1. 直接使用 virsh 的 setmaxmem 命令, 得到以下錯誤訊息 :
{{{
$ virsh setmaxmem Core455 700000
錯誤: Unable to change MaxMemorySize
錯誤: Requested operation is not valid: cannot resize the maximum memory on an active domain

$ virsh destroy Core455
區域 Core455 已經刪除

$ virsh setmaxmem Core455 700000
}}}

在 Ubuntu 10.10  的 virsh 命令中, setmaxmem 命令是無法執行, 所以只能透過 xml 設定檔來重設虛擬主機的最大記憶體
{{{
$ virsh setmaxmem DN2 700000
錯誤 : Unable to change MaxMemorySize
錯誤 : this function is not supported by the connection driver: virDomainSetMaxMemory
}}}

''方法二''
1. 修改 虛擬主機設定檔中的 <memory> 標籤內容
{{{
$ nano /etc/libvirt/qemu/DN2.xml 
<domain type='kvm'>
  <name>DN2</name>
  <uuid>d9b38353-78f2-bea1-02f5-d9b4b72dff71</uuid>
  <memory>700000</memory>
  <currentMemory>524288</currentMemory>
  <vcpu>1</vcpu>
                       :
}}}

2. 重新定義虛擬主機設定檔
{{{
# virsh define /etc/libvirt/qemu/DN2.xml 
區域 DN2 定義自 /etc/libvirt/qemu/DN2.xml
}}}

3. 檢視設定結果
虛擬主機必須關機, 才能看到重設的值 (700000)
{{{
$ virsh dominfo DN2
Id:             -
名稱:       DN2
UUID:         d9b38353-78f2-bea1-02f5-d9b4b72dff71
作業系統類型: hvm
狀態:       關機
處理器數目: 1
最大記憶體: 700000 kB
已使用的記憶體: 524288 kB
Persistent:     yes
Autostart:      disable
Security model: apparmor
Security DOI:   0
}}}

{{item1{動態設定虛擬主機的記憶體大小]}}}

''1. 檢視目前已使用的記憶體''
{{{
# virsh dominfo DN2
Id:             -
名稱:       DN2
UUID:         d9b38353-78f2-bea1-02f5-d9b4b72dff71
作業系統類型: hvm
狀態:       關機
處理器數目: 1
最大記憶體: 700000 kB
已使用的記憶體: 524288 kB
Persistent:     yes
Autostart:      disable
Security model: apparmor
Security DOI:   0
}}}

''2. 重設為 400000 (虛擬主機執行中)''
{{{
$ virsh setmem DN2 400000                   (虛擬主機必須啟動)
錯誤: Requested operation is not valid: domain is not running

$ virsh start DN2

$ virsh setmem DN2 400000
}}}

''3. 檢視虛擬主機記憶體大小''
{{{
$ virsh console DN2
Connected to domain DN2
Escape character is ^]

$ free -m
             total       used       free     shared    buffers     cached
Mem:           372         57        315          0          5         23
-/+ buffers/cache:         28        344
Swap:          565          0        565
}}}

Memory pages in the balloon are unmapped from the guest and handed back to the host, so the host can hand them out to other guests. It’s like the guest’s memory has a chunk missing from it:

@@color:blue;''Ubuntu 12.04'' 的 KVM, 內定已啟動 Balloon 功能, 而 ''Ubuntu 10.04.2'' 內定沒有啟動 Balloon 功能, 請執行以下步驟 :@@ 
{{{
$ cd /usr/bin

$ sudo mv kvm kvm.real

$ sudo nano kvm
#!/bin/bash
exec /usr/bin/kvm.real -balloon virtio "$@"

$ sudo chmod +x kvm

}}}
<<toBalaNotes "1">>

{{item1{JVM Virtual Memory}}}
{{{
This is where memory virtualization comes into play, not so much for addressing the need for elastic memory, but for reducing the amount of memory waste. The idea is pretty simple: instead of allocating a fixed amount of memory for each application as most hypervisors and Java Virtual Machines do today, all available memory can be shared across all running applications and allocated on demand. Unfortunately, such dynamic memory allocation is not possible with hypervisors today, which suggests that other means of virtualization should be used for the purpose of multi-tenancy when memory waste becomes too significant for it to be simply ignored (Cf. On Multi-Tenancy). One of them is JVM Virtualization. And as it turns out, there are many reasons why this approach should be preferred over any other.

The idea behind JVM Virtualization is to provide a virtual JVM for each application (or tenant). This technology is currently offered by Azul Systems (Learn More) and comes with a fantastic side benefit: it allows multiple Java applications to share the same memory space, in a secure manner, and without having to define a static heap size (Learn More) for each application. As a result, two birds get killed with one stone: multi-tenancy (with secure virtual JVMs) and memory waste reduction (with memory sharing).

This is particularly significant when considering the memory usage profile of most Web-based Java applications: at start time, they tend to use a very small amount of memory. But as more concurrent users start logging in, more objects must be allocated, and more memory gets used. Unfortunately, usage patterns tend to be hard to predict, therefore systems administrators have the tendency to over-allocate memory for these applications, thereby creating massive wastes of memory. It is estimated that on a time-adjusted basis, anywhere between 50% to 90% of all available memory is wasted in such a fashion. Coming back to our previous discussion, this translates into wasting 50% to 90% of all data-center resources...
}}}



///%1
//%/
For this "List" task we don't use the default action "addToList" (that simply adds all selected items to the list) but create the list using the "write" action and refer to the build-in variable "index" that is incremented for every tiddler being processed.
{{{
<<forEachTiddler
    where
        'tiddler.tags.contains("Notes")'
    write
        '(index < 10) ? "* [["+tiddler.title+"]]\n" : ""'
>>
}}}
In the write parameter there is a conditional output: when we are processing the tiddlers 0 to 9 it will write a line with:

 {{{* [[theTiddlerName]]}}}

Tiddler 10 and the following ones will generate no output (as the empty string is specified).

''//Result://''
<<forEachTiddler
    where
        'tiddler.tags.contains("Notes")'
    write
        '(index < 10) ? "* [["+tiddler.title+"]]\n" : ""'
>>
{{item1{檢測所有網段}}}
請至 oc9 目錄中, 執行以下命令 

''1. 編輯 ./conf/netid.conf 這個設定檔''
{{{
$ nano conf/netid.conf
192.168.100.0/24
192.168.66.0/25
192.168.88.0/24
}}}

''2. 開始檢測各網段''
使用 netcheck.sh 檢測各網段的所有虛擬電腦, 此程式內定會去讀 ''./conf/netid.conf'' 這個設定檔
{{{
$ sudo ./netcheck.sh 

<html>
<head>
<link rel=stylesheet type='text/css' href='netid.css'>
</head>
<body>

<div id='192.168.100.0'>192.168.100.0/24</div>
<ol>
<li>192.168.100.1 跳過 (本機位址)
<li>192.168.100.66 ssh 
<li>192.168.100.88 ssh 
<li>192.168.100.254 ssh 
</ol>

<div id='192.168.66.0'>192.168.66.0/25</div>
<ol>
<li>192.168.66.1 跳過 (本機位址)
<li>192.168.66.11 
<li>192.168.66.126 ssh 
</ol>

<div id='192.168.88.0'>192.168.88.0/24</div>
<ol>
<li>192.168.88.1 跳過 (本機位址)
<li>192.168.88.11 
<li>192.168.88.254 ssh 
</ol>

</body>
</html>

}}}

執行 checknid.sh 這程式, 可以直接指定 Network ID (140.137.214.0/24), 如下例 :
{{{
# sudo ./checknid.sh 140.137.214.0/24
}}}

{{item1{批次安裝 iClass 系統}}}
iClass 系統, 是由 Apache 及 Samba 協同建立的應用系統. 執行此一實作, 必需先安裝 Lab202. 在 Lab202 中, 已安裝一部 Ubuntu Server (AS996), 在 Linux 裸機系統中, 可使用 cpiclass.sh 程式, 將 iClass 系統, 複製到 AS996 虛擬電腦中. 以下為建置步驟 :

''1. 開始複製''
{{{
$ cd iLab/oc9/

$ sudo ./cpiclass.sh 

192.168.100.0/24 
--------------------------
192.168.100.1 跳過 (本機位址)
192.168.100.99 沒有安裝 OpenSSH
192.168.100.128 沒有安裝 OpenSSH
192.168.100.129 沒有安裝 OpenSSH
192.168.100.254 無法複製, 因不是 Ubuntu 系統

192.168.66.0/25 
--------------------------
192.168.66.1 跳過 (本機位址)

192.168.88.0/24 
--------------------------
192.168.88.1 跳過 (本機位址)

192.168.99.0/24 
--------------------------
192.168.99.1 跳過 (本機位址)
192.168.99.5 無法複製, 因不是 Ubuntu 系統
192.168.99.6 複製成功
192.168.99.254 沒有安裝 OpenSSH
}}}

''2. 登入 iClass 主機''
{{{
root@US1043:~$ sudo virsh console AS996
Connected to domain AS996
Escape character is ^]

root@AS996:~# 
}}}

''3. 安裝 iClass 系統''
{{{
root@AS996:~# cd iclass/
root@AS996:~# sudo ./iclass.sh 
確定要安裝 Apache2 伺服器 (y/n) : y
開始更新套件清單 ..... 請稍待 -> 更新成功 
Apache2 伺服器安裝成功
家目錄模組 (userdir) 安裝成功
設定檔 (/etc/apache2/httpd.conf) 複製成功
Apache2 伺服器重新啟動成功

確定要安裝 Samba 伺服器 (y/n) : y
Samba 伺服器安裝成功
設定檔 (/etc/samba/smb.conf) 複製成功
帳號同步模組安裝成功
Samba 伺服器重新啟動成功

請按任何鍵繼續.. 
}}}

<<toBalaNotes "1">>
{{item1{手動安裝 iClass 主機}}}

''1. 安裝 Apache Server''
{{{
$ sudo apt-get install apache2
}}}

''2. 啟動 mod_userdir 動態模組''
{{{
$ sudo a2enmod userdir
Enabling module userdir.
Run '/etc/init.d/apache2 restart' to activate new configuration!
}}}

''3. 修改 /etc/apache2/httpd.conf 設定檔內容''
{{{
$ sudo nano /etc/apache2/httpd.conf
}}}

修改內容如下 : 
{{{
# Settings for user home directories
#
# Required module: mod_userdir   (a2enmod 命令啟動)
#
# UserDir: The name of the directory that is appended onto a user's home
# directory if a ~user request is received.  Note that you must also set
# the default access control for these directories, as in the example below.
#
UserDir www   

<Directory /home/*/www>
    AllowOverride FileInfo AuthConfig Limit Indexes
    Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec
    <Limit GET POST OPTIONS>
        Order allow,deny
        Allow from all
    </Limit>
    <LimitExcept GET POST OPTIONS>
        Order deny,allow
        Deny from all
    </LimitExcept>
</Directory>
}}}

''4. 建立家目錄的虛擬目錄''
{{{
$ cd /var/www
$ sudo ln -s /home/student/www student
}}}

''5. 安裝 Samba Server''
{{{
$ sudo apt-get install samba
}}}

如需手動同步 Linux 與 Samba 帳號資料庫, 可執行以下步驟 : 
{{{
$ sudo apt-get install libpam-smbpass
$ sudo smbpasswd -a student
}}}

''6. 設定 Samba Server''
{{{
# rm /etc/samba/smb.conf
# nano /etc/samba/smb.conf
[global]
workgroup = OC9
wins support = yes
netbios name = OC9FS
os level = 255

domain master = no
preferred master = yes
local master = yes

browse list = yes
dns proxy = no

# 採用 Samba 自行認證模式
security = user
guest only = no
guest ok = no

name resolve order = wins lmhosts bcast

log file = /var/log/samba/log.%m
max log size = 1000
syslog = 2


# 中文顯示設定
unix charset  = utf8
display charset = utf8
dos charset  = cp950

[homes]
  comment = Home Directories

  # browseable = no 這設定, 使得在網芳系統不會看到 homes 這名稱
  browseable = no

  # By default, the home directories are exported read-only. Change next
  # parameter to ‘yes’ if you want to be able to write to them.
  writable = yes

  # By default, \\server\username shares can be connected to by anyone
  # with access to the samba server. Un-comment the following parameter
  # to make sure that only “username” can connect to \\server\username
  valid users = %S

  # 符合指定格式的檔案(.*), 會自動被附於 '隱藏' 屬性
  hide files = /.*/

  create mode = 0664
  directory mode = 0775
}}}
<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
本文網址 : http://azouhr.wordpress.com/2012/06/03/libvirt-chardev-opening-backend-pty-failed-permission-denied/

Recently I found myself in front of a strange problem that prevented me from creating new virtual machines with libvirt on KVM. Everytime I tried to create a virtual machine, I got a message similar to this:

Error: internal error Process exited while reading console log output: chardev: opening backend "pty" failed: Permission denied

Interestingly, directly after a reboot of the host, the same guest configuration would simply work. I did some searches in the internet and found, that there only view other people had this same problem, but I could not find a solution.

After tracing libvirtd and pestering some of my colleagues, I found that it actually could not access /dev/pts correctly. It turned out, that some change root environment also mounted /dev/pts although not with the right mount parameters. This had the effect, that the original /dev/pts also was remounted with the wrong mounting parameters.

So, to solve this issue, you need to
{{{
    find who is mounting /dev/pts in a wrong way and correct it
    remount /dev/pts correctly
}}}
The remount can be done with the following command:
{{{
mount -n -t devpts -o remount,mode=0620,gid=5 devpts /dev/pts
}}}
After this, libvirtd will be able again to access the device and work as desired.

<<forEachTiddler 
 where
   'tiddler.tags.contains("toBalaMacro")'
>>
''參考文章''
1.How To Install Node.js on an Ubuntu 14.04 server
https://www.digitalocean.com/community/tutorials/how-to-install-node-js-on-an-ubuntu-14-04-server

{{{
student@UB14DOS2:~/dos$ sudo add-apt-repository ppa:chris-lea/node.js
 Evented I/O for V8 javascript. Node's goal is to provide an easy way to build scalable network programs
 更多資訊:https://launchpad.net/~chris-lea/+archive/node.js
請輸入 [ENTER] 繼續,或 Ctrl-C 來取消加入動作

gpg: `/tmp/tmpn8rw9iix/secring.gpg' 鑰匙圈已建立
gpg: `/tmp/tmpn8rw9iix/pubring.gpg' 鑰匙圈已建立
gpg: 正在請求金鑰 C7917B12 自 hkp 伺服器 keyserver.ubuntu.com
gpg: /tmp/tmpn8rw9iix/trustdb.gpg: 建立了信任資料庫
gpg: 金鑰 C7917B12: 公鑰 "Launchpad chrislea" 已匯入
gpg: 處理總量: 1
gpg:               已匯入: 1  (RSA: 1)
OK
student@UB14DOS2:~/dos$ 
}}}

{{{
student@UB14DOS2:~/dos$ sudo apt-get update

student@UB14DOS2:~/dos$ sudo apt-get install nodejs
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  rlwrap
下列【新】套件將會被安裝:
  nodejs rlwrap
升級 0 個,新安裝 2 個,移除 0 個,有 0 個未被升級。
需要下載 4,333 kB 的套件檔。
此操作完成之後,會多佔用 17.5 MB 的磁碟空間。
Do you want to continue? [Y/n] 

}}}


''參考文章''
1. 認識邊緣網路架構 VEB、VN-link、VEPA技術介紹 (一定要看)
http://www.netadmin.com.tw/article_content.aspx?sn=1112070005
2. Bridging (networking)
http://en.wikipedia.org/wiki/Bridging_%28networking%29
3. What is a Network Bridge?
http://www.wisegeek.org/what-is-a-network-bridge.htm
4. Spanning Tree Protocol
http://en.wikipedia.org/wiki/Spanning_Tree_Protocol
5. Understanding Spanning Tree
http://www.linuxplanet.com/linuxplanet/tutorials/6520/1

{{item1{橋接器 (Bridge)}}}
A network bridge, also known as a layer 2 switch, is a hardware device used to create a connection between two separate computer networks or to divide one network into two. Both networks usually use the same protocol; Ethernet is an example of a protocol. Network devices include, but are not limited to, Personal Computers (PCs), printers, routers, switches and hubs. Devices connected to a network via an Ethernet adapter card have what is known as a Media Access Control (MAC) address, also called a physical or hardware address. It is this address that uniquely identifies a device to a bridge that can then determine to which network the device is connected.

The principal function of a network bridge is to forward data based on the MAC address of the sending and receiving devices. This operation helps to eliminate what are known as collision domains. One way of defining a collision domain is a network in which one device, also called a node, forces every other device to listen when it is transmitting data packets. Another definition states that a collision domain exists when two or more devices attempt to transmit information at the exact same time. Networks running Carrier Sense Multiple Access/Collision Detection (CSMA/CD) should, in theory, be protected from collisions occurring, but CSMA/CD can fail.

{{item1{Spanning Tree Protocol}}}
{{op1{Spanning Tree Protocol 簡介}}}

Spanning Tree Protocol (STP)是執行在橋接器(Bridge)或是交換器(Switch)上 的一種Layer 2協定,STP的規格是定義在IEEE 802.1D標準規範中。STP的主 要目的是在使用Bridge 或Switch 連接成網路時,來避免因為使用冗餘路徑 (Redundant Path)機制而造成有迴圈(Loop)的情況發生。STP可以偵測及免除網路 迴圈,並且在Bridge或Switch之間提供備用的連線,STP 允許Bridge或Switch 與其他STP 相容的裝置互動,來確保在網路上的兩個節點之間只有一條路徑存 在。 若是一個冗餘拓撲(Redundant Topology)沒有使用STP的話,將會發生下列 問題。

1. 廣播風暴(Broadcast Storm) 若是沒有Spanning Tree迴圈避免機制的話,每個Bridge或Switch將會永無 止境地Flood廣播封包至所有的Port上,這種現像稱為廣播風暴。

2. 過濾資料庫不穩定(Filtering Database Instability) 當一個Frame 的數個複本抵達一台Bridge 或Switch 不同的Port 上時,在 Filtering Database中的MAC映對資料將會不穩定。

{{op1{Spanning Tree Protocol的運作方式}}}

Spanning Tree提供了一個無迴圈的網路,當一台支援STP的Switch在網路 拓撲中發現有迴圈的狀況,它會擋住(Block)一個或多個冗餘的Port,Spanning Tree持續不斷地探索此網路,當發現網路拓撲有改變時,STP會自動利用擋住某 些Port的特性能避免錯誤的情況發生。

執行Spanning Tree演算法的Bridge或Switch會定期交換組態訊息,此組態 訊息是一種稱為BPDU (Bridge Protocol Data Unit)或Hello Message的Multicast Frame。根據BPDU的內容,這些STP裝置將可以以樹狀結構的方式來建構一個 無迴圈的網路

{{item1{將 Linux 系統轉換成 Bridge 設備}}}
老實說是用過虛擬化 KVM 之後才發現有 Linux 下網路的模式有一個東西叫做 Bridge mode,有點像將多個 NIC Adapter ports 透過一個虛擬出來的 Bridge(Switch) 全部串在一起,再透過一個虛擬出來的網路裝置出去,這簡直就像是把 Linux 當成一個 switch 來使用了.除了虛擬化 KVM 的環境預設已經使用一個 Bridge 外,在一般的 Linux 環境下的方法也很簡單:我們也可以透過下面幾個簡單的指令來產生自己所需要的 Bridge 裝置出來.下面的圖示你可以很清楚的看出來虛擬 NIC (bri0) 和實體 NIC (eth0,eth1,eth2) 是如何透過 bridge 模式串接在一起.

[img[img/bridge_mode.png]]

''1. 目標是用 eth0, eth1, eth2 來產生一個 bridge(bri0)''
{{{
# ifconfig eth0 0.0.0.0
# ifconfig eth1 0.0.0.0
# ifconfig eth2 0.0.0.0
# brctl addbr bri0
# brctl addif bri0 eth0 eth1 eth2
}}}

''2. 看這一些 eth 是在哪一個 bridge 上.''
{{{
# brctl show
bridge name     bridge id               STP enabled     interfaces
bri0            8000.001517785dd6       no                      eth2
                                                                eth1
                                                                eth0
virbr0          8000.000000000000       yes
}}}
virbr0 是 RedHat 5 預設的 bridge 而 bri0 是剛剛透過 brctl 工具所建立出來的. 

''3. 設定一組 IP 方便遠端管理使用.''
{{{
# ifconfig bri0 192.8.1.3
# ifconfig bri0
bri0      Link encap:Ethernet  HWaddr 00:15:17:78:5D:D6 
          inet addr:192.8.1.3  Bcast:192.8.1.255  Mask:255.255.255.0
          inet6 addr: fe80::215:17ff:fe78:5dd6/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:14767 errors:0 dropped:0 overruns:0 frame:0
          TX packets:34 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:679328 (663.4 KiB)  TX bytes:9626 (9.4 KiB)
}}}

''4. 最後 Allow IP forwarding''
要讓 IP Forwarding 生效最快的方式就是直接更改 /proc/sys/net/ipv4/ip_forward 的值,但這方式在下次開機後會失效.
{{{
# sudo bash -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
}}}

還是要更改設定檔,這樣下次開機一樣有效,並使用指令 sysctl -p 讓他立即生效.
{{{
# vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
# sysctl –p
}}}
 之後所有插在 eth0, eth1, eth2 的 Hosts 都可以互通了!!可以將剛剛的步驟寫在 /etc/rc.local,讓他在開機時候就會生效.

一開始你可能會對於 Bridge 和 Bonding 這兩東西有所困惑,感覺功能都是將多個 NIC ports 綁在一起來使用,但是最後功能上又不是很像.其實Bridge mode 的功能是將 linux 模擬成一個 Bridge(switch)來使用.凡是 Bridge 上的 NIC ports 實體網路埠都會透過一個虛擬的網路裝置出去,這就相當於是一個交換機(switch) 一樣,每個實體網路埠都有專屬的 MAC.不像是 Bonding 外面看來所有的 NIC 的 MAC 都是一樣.

還有一點要注意的 bridge 是不能實現 bonding 的負載平衡(load-balancing), 容錯(fault-tolerance) 的功能.如果你把 brctl (Bridge) 的 NIC ports 實體網路埠都接到同一交換機(Switch),那麼就會出現 Loop 環路(預設都是沒有將 STP 開啟).接下來大概你的 switch 因為廣播風暴就掛了.

{{item1{安裝 Bridge 套件}}}
{{{
$ sudo apt-get install bridge-utils
}}}

''[註]'' 在 10.04 版, 如已安裝 KVM 套件, 上述命令則不需要執行

{{item1{建立 Bridge 網路介面}}}

''1. 檢視修改前網路設定''
{{{
$ ifconfig -a
eth0    Link encap:Ethernet  HWaddr 00:09:73:d7:ae:5b  
          inet addr:140.137.214.62  Bcast:140.137.214.255  Mask:255.255.255.0
          inet6 addr: fe80::209:73ff:fed7:ae5b/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:133119 errors:0 dropped:0 overruns:0 frame:0
          TX packets:61110 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:191146613 (191.1 MB)  TX bytes:5693333 (5.6 MB)
          Interrupt:16 Base address:0xe800 

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:16436  Metric:1
          RX packets:61996 errors:0 dropped:0 overruns:0 frame:0
          TX packets:61996 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:87349030 (87.3 MB)  TX bytes:87349030 (87.3 MB)

virbr0  Link encap:Ethernet  HWaddr 00:00:00:00:00:00  
          inet addr:192.168.122.1  Bcast:192.168.122.255  Mask:255.255.255.0
          inet6 addr: fe80::b40d:e8ff:fe29:efdf/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:59908 errors:0 dropped:0 overruns:0 frame:0
          TX packets:127091 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:3495455 (3.4 MB)  TX bytes:190580365 (190.5 MB)
}}}

@@color:red;如果有看到 eth2 網卡, 請務必執行以下命令 (只適用 Ubuntu):@@
{{{
$ sudo rm /etc/udev/rules.d/70-persistent-net.rules
}}}

或

將以下程式段, 加到 ~/.bashrc 這檔案的最後
{{{
ifconfig -a | grep eth0 &>/dev/null
if [ "$?" != "0" ]; then
   read -p "reset MAC ? (y/n) : " ans
   if [ "$ans" == "y" ]; then
      sudo rm /etc/udev/rules.d/70-persistent-net.rules &>/dev/null
      sudo reboot
   fi
   echo ""
fi
}}}

''[註]'' virbr0 是 KVM 系統自動產生, 主要是執行 NAT 功能, 

''2. 修改網路設定檔''
{{{
$ sudo nano /etc/network/interfaces
auto lo
iface lo inet loopback

auto eth0
iface eth0 inet manual

auto br0
iface br0 inet static
        address 192.168.0.10
        netmask 255.255.255.0
        gateway 192.168.0.1
        bridge_ports eth0
        bridge_stp off
        dns-nameservers 168.95.1.1
}}}

@@color:red;''[註]'' dns-nameservers 一定要設定, 沒有設定會造成 "sudo apt-get update" 執行失敗@@

''bridge_stp state''
{{{
SPANNING TREE PROTOCOL

       Multiple ethernet bridges can  work  together  to  create  even  larger
       networks  of  ethernets  using  the IEEE 802.1d spanning tree protocol.
       This protocol is  used  for  finding  the  shortest  path  between  two
       ethernets,  and  for  eliminating  loops  from  the  topology.  As this
       protocol is a standard, linux  bridges  will  interwork  properly  with
       other  third  party bridge products. Bridges communicate with eachother
       by sending and receiving BPDUs  (Bridge  Protocol  Data  Units).  These
       BPDUs   can  be  recognised  by  an  ethernet  destination  address  of
       01:80:c2:00:00:00.

       The spanning tree protocol can also be turned off (for those situations
       where  it  just  doesn’t make sense, for example when this linux box is
       the only bridge on the LAN, or when you know that there are no loops in
       the topology.)
}}}

''3. 重新啟動網路''
{{{
$ sudo /etc/init.d/networking restart
}}}

''4. 檢視修改後網路設定''
{{{
$ ifconfig -a
br0     Link encap:Ethernet  HWaddr 00:09:73:d7:ae:5b  
          inet addr:140.137.214.251  Bcast:140.137.214.255  Mask:255.255.255.0
          inet6 addr: fe80::209:73ff:fed7:ae5b/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:31 errors:0 dropped:0 overruns:0 frame:0
          TX packets:46 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:2391 (2.3 KB)  TX bytes:6846 (6.8 KB)

eth0    Link encap:Ethernet  HWaddr 00:09:73:d7:ae:5b  
          inet6 addr: fe80::209:73ff:fed7:ae5b/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:44 errors:0 dropped:0 overruns:0 frame:0
          TX packets:47 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:4245 (4.2 KB)  TX bytes:6946 (6.9 KB)
          Interrupt:16 Base address:0xe800 

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:16436  Metric:1
          RX packets:20 errors:0 dropped:0 overruns:0 frame:0
          TX packets:20 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:1200 (1.2 KB)  TX bytes:1200 (1.2 KB)

virbr0   Link encap:Ethernet  HWaddr 2a:7e:20:bd:b6:da  
          inet addr:192.168.122.1  Bcast:192.168.122.255  Mask:255.255.255.0
          inet6 addr: fe80::287e:20ff:febd:b6da/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:45 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:9288 (9.2 KB)
}}}

''[註]'' br0 會取代 eth0 網路介面

The new bridge interface should now be up and running. The ''brctl'' provides useful information about the state of the bridge, controls which interfaces are part of the bridge, etc. See ''man brctl'' for more information. 

''檢視 Bridge 資訊''
{{{
$ brctl show
bridge name     bridge id               STP enabled     interfaces
br0             8000.00097359af5b       no              eth0
                                                        vnet0
                                                        vnet1
                                                        vnet2
virbr0          8000.000000000000       yes
virbr1          8000.000000000000       yes
student@U104-Desktop:~$
}}}

''認識 TUN/TAP 核心虛擬網路裝置''
{{{
In computer networking, TUN and TAP are virtual network kernel devices. They are network devices that are supported entirely in software, which is different from ordinary network devices that are backed up by hardware network adapters.

TAP (as in network tap) simulates an Ethernet device and it operates with layer 2 packets such as Ethernet frames. TUN (as in network TUNnel) simulates a network layer device and it operates with layer 3 packets such as IP packets. TAP is used to create a network bridge, while TUN is used with routing.

Packets sent by an operating system via a TUN/TAP device are delivered to a user-space program that attaches itself to the device. A user-space program may also pass packets into a TUN/TAP device. In this case TUN/TAP device delivers (or "injects") these packets to the operating system network stack thus emulating their reception from an external source.
}}}
<<toBalaNotes "bridge">>

{{item1{新增虛擬主機 Bridge 網路介面}}}

[img[img/kvm/VMBridge01.png]]

[img[img/kvm/VMBridge02.png]]

[img[img/kvm/VMBridge03.png]]

[img[img/kvm/VMBridge04.png]]

<<toBalaNotes "vmbridge">>

///%bridge
//%/

///%vmbridge
//%/
''參考文章''
1. VYATTA Linux Router - Part 1
http://jhlug.org/wiki/index.php/VYATTA_Linux_Router_-_Part_1

Brocade Vyatta 官方網址 : http://www.brocade.com/products/all/network-functions-virtualization/index.page
Vyatta 社群 : http://www.vyatta.org/

{{item1{Vyatta.com (商業版) has moved}}}

Vyatta is now a ''Brocade company''.
You are being redirected to Brocade.com, where you can find the latest information and support options for Brocade Vyatta vRouters and explore the full portfolio of Brocade products and solutions.

{{item1{虛擬網路裝置 - Vyatta}}}

以 Cisco 用戶為目標,企圖瓜分數十億路由器市場,開放源碼網路商 Vyatta 釋出新版開放源碼路由軟體。一般的 x86 硬體,經由這套社群支援以 Debian 為基礎的 Linux 散佈套件,搖身一變成為企業路由器、防火牆與 VPN 設備。Vyatta 社群版 3(Vyatta Community Edition 3)新功能包括 IPSec VPN、多重鏈結 PPP(multilink PPP)、邊界閘道協定(border gateway protocol,BGP)。

前 MontaVista 副總裁 Kelly Herrell 領軍的 Vyatta,旗艦產品為一套商業支援的 Linux 散佈套件,專門為整合型路由、防火牆與 VPN 經過最佳化,可預先安裝在 Dell PowerEdge 伺服器硬體上,或以軟體形式銷售。運用 x86 硬體,取代來自 Cisco 或 Juniper 昂貴的單一廠商硬體設計,第二級網際網路服務商(ISP)與擁有大型網路的企業藉此可節省成本並提高彈性。

Vyatta 策略副總裁 Dave Roberts 表示,Vyatta 的簽訂版本與社群版本,雖然版本號相同,但功能互異。VC3 簽訂版本的版本號將為 2.3,預計在社群版本釋出後數週推出。雖然簽訂版與社群版都可免費自 Vyatta 取得,不過 Vyatta 僅提供完整商業支援給簽訂版用戶。Roberts 表示,社群版的步調較快,提供支援的版本相對較為保守。

VC3 和 VC2 相同,都是以標準 Debian Linux 為基礎,Debian 約 1 萬 9 千種 x86 套件可提供用戶在系統架設上所需。Roberts 表示,他們只映射部份散佈套件,其他元件則會請用戶從 ftp.debian.org 取得。

多重鏈結 PPP 為最重要的新增功能。企業運用該功能,可彙整多條如 T1 等較小的租用連線。Roberts 表示大多數 ISP 都支援多重鏈結連線。VC3 支援包括 VMWare 與 Xen 在內的虛擬化技術,藉由此一功能,部門辦公室可將基礎架構統整到單一裝置中。

SSL-VPN 並未在 Vyatta 社群版或簽訂版本支援功能之列。Roberts 表示,由於以 Debian Linux 為基礎,OpenVPN SSL-VPN 軟體可由用戶自行安裝到執行 Vyatta 的路由器中。但由於無法整合到 Vyatta 核心管理架構中,因此這種做法其實並不理想。

預計在 2008 年 Vyatta 的下一次釋出之後,可望出現更為開放的架構。Roberts 表示,他們計畫讓用戶用更簡單的方式操作系統,而無須處理原始碼層次的問題。用戶可以自行整合所需功能,Vyatta 平台的創新將不單單來自於 Vyatta,而是整個社群。

Herrell 表示,該公司開放源碼網路解決方案的效能與可靠度,已經在大型關鍵網路上獲得認可。Vyatta 可以部署在刀鋒伺服器(blade server)、專屬裝置,或使用 VMWare 與 Xen 的虛擬機器中。

對於 Vyatta 而言,由於路由器市場近乎完全商品化,技術並非開放源碼廠商可以仰賴的競爭基礎或優勢。正如 Roberts 表示,路由、防火牆、VPN 等都是相當成熟的技術,其創新全盛期在 90 年代初期。目前各家的網路建構技術基本上大同小異,真正有所競爭的將是廠商品牌與服務。

@@color:red;Vyatta is a Debian based Linux distribution. Pronounced vee-AH-tah: ancient Sanskrit and means "open."@@

{{item1{Vyatta 514 Appliance}}}

[img[img/vyatta/Vyatta_514_Appliance.PNG]]

1GHz VIA C7 CPU
512MB RAM (upgradeable to 1GB)
2GB Compact Flash
4 Ethernet Channels (eth0 to eth3)
PCI-32 expansion slot for WAN card
1 RS232 Console Port
2 USB ports, for alternative boot devices
1.97" (50mm) Height
8.86" (225mm) Width
8.07" (205mm) Depth

''Hardware First Impressions''

[img[img/vyatta/Vyatta_514_Top.JPG]]

[img[img/vyatta/Vyatta_514_Bottom.JPG]] 

<<toBalaNotes "1">>
{{item1{開始安裝 Vyatta}}}

下載安裝光碟 : http://www.vyatta.org/downloads

''1. 啟動 虛擬工廠 程式, 選擇 新增原型虛擬主機 (newvm.sh)''

[img[img/vyatta/vyattains01.png]]

''2. 登入畫面, 輸入 vyatta/vyatta''

[img[img/vyatta/vyattains02.png]]

''3. 輸入 install system 命令, 開始將 Vyatta 系統安裝至硬碟''

The wizard is opened and you are going to through installation process:
Would you like to continue? (Yes/No) [YES]: ''Enter''
Partition (Auto/Union/Parted/Skip) [Auto]: ''Enter''
Install the image on? [sda]: ''Enter''
This will destroy all data on /dev/sda.
Continue? (Yes/No) [No]: ''Yes''
How big of root partition should I create? (1000MB – 1074MB) [1074]MB: ''Enter''
I found the following configuration files
/opt/vyatta/etc/config/config.boot

Which one should I copy to sda? [/opt/vyatta/etc/config/config.boot] ''Enter''
Enter password for administrator account
Enter vyatta password: ''student''
Retype vyatta password: ''student''
Which drive should GRUB modify the boot partition on? [sda]: ''Enter''
Done!


''4. 顯示系統版本資訊''
{{{
vyatta@vyatta:~$ show version
Version:      VC6.1-2010.08.20
Description:  Vyatta Core 6.1 2010.08.20
Copyright:    2006-2010 Vyatta, Inc.
Built by:     autobuild@vyatta.com
Built on:     Fri Aug 20 04:27:08 UTC 2010
Build ID:     1008200429-170b446
Boot via:     image
Uptime:       15:02:36 up 5 min,  1 user,  load average: 0.00, 0.01, 0.00
}}}

''5. 關機''
{{{
$ sudo shutdown -h now
}}}
<<toBalaNotes "2">>

{{item1{送出虛擬主機}}}

[img[img/vyatta/vyattacp01.png]]

[img[img/vyatta/vyattacp02.png]]

{{item1{建置虛擬主機}}}

[img[img/vyatta/vyattavm01.png]]

[img[img/vyatta/vyattavm02.png]]

[img[img/vyatta/vyattavm03.png]]

[img[img/vyatta/vyattavm04.png]]

[img[img/vyatta/vyattavm05.png]]

{{item1{新增內部網路卡}}}

[img[img/vyatta/vyattavm06.png]]

[img[img/vyatta/vyattavm07.png]]

<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/
{{item1{下載與安裝 iLab 雲中虛擬系統平台}}}

1. 下載網址
{{{
$ cd 
$ wget http://www.tobala.net/download/iLab.zip
}}}

2. 將 iLab 雲中虛擬系統平台, 解壓縮至家目錄
{{{
$ unzip iLab.zip
}}}
{{item1{建立 iLab 雲中虛擬教學模組}}}

''進入 iLab 目錄''
{{{
$ cd iLab
}}}

''方法 1 : 詢問式建立 (網路與電腦)''
{{{
$ sudo ./labcmd.sh create Lab101

=> 開始建立 Lab101/ 虛擬網路 (Virtual Network)

建立 HUB88 虛擬網路 (Hub) ? (y/n) y
啟動 HUB88 虛擬網路成功

建立 HUB100 虛擬網路 (Hub) ? (y/n) y
啟動 HUB100 虛擬網路成功

建立 HUB660 虛擬網路 (Hub) ? (y/n) y
啟動 HUB660 虛擬網路成功

建立 HUB661 虛擬網路 (Hub) ? (y/n) y
啟動 HUB661 虛擬網路成功

=> 開始建立 Lab101/ 虛擬電腦 (Virtual Machine)

建立 GW100 虛擬電腦 ? (y/n) y
複製 GW100.vmdk 檔案 ... 成功
建立 GW100 虛擬電腦完成

建立 R88 虛擬電腦 ? (y/n) y
複製 R88.vmdk 檔案 ... 成功
建立 R88 虛擬電腦完成

建立 TC88 虛擬電腦 ? (y/n) y
複製 TSC456.iso 檔案 ... 成功
建立 TC88 虛擬電腦完成

建立 R660 虛擬電腦 ? (y/n) y
複製 R660.vmdk 檔案 ... 成功
建立 R660 虛擬電腦完成

建立 TC660 虛擬電腦 ? (y/n) y
TSC456.iso 檔案已存在
建立 TC660 虛擬電腦完成
}}}

''方法 2 : 批次快速建立''
{{{
$ sudo ./labcmd.sh create -f Lab101

=> 開始建立 Lab101/ 虛擬網路 (Virtual Network)

啟動 HUB88 虛擬網路成功

啟動 HUB100 虛擬網路成功

啟動 HUB660 虛擬網路成功

啟動 HUB661 虛擬網路成功

=> 開始建立 Lab101/ 虛擬電腦 (Virtual Machine)

複製 GW100.vmdk 檔案 ... 成功
建立 GW100 虛擬電腦完成

複製 R88.vmdk 檔案 ... 成功
建立 R88 虛擬電腦完成

複製 TSC456.iso 檔案 ... 成功
建立 TC88 虛擬電腦完成

複製 R660.vmdk 檔案 ... 成功
建立 R660 虛擬電腦完成

TSC456.iso 檔案已存在
建立 TC660 虛擬電腦完成
}}}
<<toBalaNotes "1">>
{{item1{啟動 Lab101 虛擬教學模組}}}
{{{
$ sudo ./labcmd.sh start Lab101

關閉所有互斥虛擬電腦
-------------------------
GWVT100 不存在
RVT88 不存在
RVT660 不存在
GWFW100 不存在
RFW88 不存在
RFW660 不存在

啟動 Lab101 所有虛擬電腦
-----------------------------
GW100 啟動中 .... 成功
R88 啟動中 .... 成功
TC88 設定手動啟動
R660 啟動中 .... 成功
TC660 設定手動啟動
}}}

{{item1{檢視 Lab101 狀態}}}
{{{
$ sudo ./labcmd.sh list

Lab101:
 GW100 執行中
 R660 執行中
 R88 執行中
 TC660 關機
 TC88 關機
}}}

{{item1{啟動單一虛擬電腦}}}
{{{
$ sudo ./labcmd.sh start Lab101 TC88

啟動 Lab101 單一虛擬電腦
-----------------------------
TC88 啟動中 .... 成功
}}}

{{item1{登入 TC88 - 視窗模式}}}
{{{
$ sudo virt-viewer TC88
}}}

{{item1{登入 R88 - Console 模式}}}
{{{
$ sudo virsh console R88
}}}

''[註]'' 登入帳號為 root, 密碼為 student, 按 "ctrl + ]" 脫離  Console 模式

<<toBalaNotes "2">>
{{item1{關閉 Lab101 虛擬教學模組}}}
{{{
$ sudo ./labcmd.sh stop Lab101/

關閉 Lab101/ 所有虛擬電腦
-----------------------------
GW100 關機中 ....  shutdown 成功
R88 關機中 ....  shutdown 成功
TC88 執行中(設定手動關機)
R660 關機中 ....  shutdown 成功
TC660 已關機
}}}

{{item1{檢視 Lab101 狀態}}}
{{{
$ sudo ./labcmd.sh list

Lab101:
 GW100 關機
 R660 關機
 R88 關機
 TC660 關機
 TC88 執行中
}}}

{{item1{關閉單一虛擬電腦}}}
{{{
$ sudo ./labcmd.sh stop Lab101 TC88

關閉 Lab101/ 單一虛擬電腦
-----------------------------
TC88 關閉中 .... 成功
}}}
<<toBalaNotes "3">>
{{item1{刪除 Lab101 虛擬教學模組}}}

''方法 1 : 詢問式刪除''
{{{
$ sudo ./labcmd.sh delete Lab101

=> 開始刪除 Lab101/ 虛擬網路 (Virtual Network)

刪除 HUB88 虛擬網路 (Hub) ? (y/n) y
刪除 HUB88 虛擬網路完成

刪除 HUB100 虛擬網路 (Hub) ? (y/n) y
刪除 HUB100 虛擬網路完成

刪除 HUB660 虛擬網路 (Hub) ? (y/n) y
刪除 HUB660 虛擬網路完成

刪除 HUB661 虛擬網路 (Hub) ? (y/n) y
刪除 HUB661 虛擬網路完成

=> 開始刪除 Lab101/ 虛擬電腦 (Virtual Machine)

刪除 GW100 虛擬電腦 ? (y/n) y
刪除 GW100 虛擬電腦成功
刪除 GW100 虛擬電腦硬碟檔 ? (y/n) y
刪除 GW100.vmdk 虛擬電腦硬碟檔成功

刪除 R88 虛擬電腦 ? (y/n) y
刪除 R88 虛擬電腦成功
刪除 R88 虛擬電腦硬碟檔 ? (y/n) y
刪除 R88.vmdk 虛擬電腦硬碟檔成功

刪除 TC88 虛擬電腦 ? (y/n) y
刪除 TC88 虛擬電腦成功
刪除 TC88 虛擬電腦硬碟檔 ? (y/n) y
刪除 TSC456.iso 虛擬電腦硬碟檔成功

刪除 R660 虛擬電腦 ? (y/n) y
刪除 R660 虛擬電腦成功
刪除 R660 虛擬電腦硬碟檔 ? (y/n) y
刪除 R660.vmdk 虛擬電腦硬碟檔成功

刪除 TC660 虛擬電腦 ? (y/n) y
刪除 TC660 虛擬電腦成功
刪除 TC660 虛擬電腦硬碟檔 ? (y/n) y
刪除 TSC456.iso 虛擬電腦硬碟檔成功
}}}

''方法 2 : 批次快速刪除''
{{{
$ sudo ./labcmd.sh delete -f Lab101

=> 開始刪除 Lab101/ 虛擬網路 (Virtual Network)

刪除 HUB88 虛擬網路完成

刪除 HUB100 虛擬網路完成

刪除 HUB660 虛擬網路完成

刪除 HUB661 虛擬網路完成

=> 開始刪除 Lab101/ 虛擬電腦 (Virtual Machine)

刪除 GW100 虛擬電腦成功
刪除 GW100.vmdk 虛擬電腦硬碟檔成功

刪除 R88 虛擬電腦成功
刪除 R88.vmdk 虛擬電腦硬碟檔成功

刪除 TC88 虛擬電腦成功
刪除 TSC456.iso 虛擬電腦硬碟檔成功

刪除 R660 虛擬電腦成功
刪除 R660.vmdk 虛擬電腦硬碟檔成功

刪除 TC660 虛擬電腦成功
刪除 TSC456.iso 虛擬電腦硬碟檔成功
}}}

<<toBalaNotes "4">>


///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/
{{item1{匯出虛擬網路設定檔}}}

''1. 檢視虛擬網路''
{{{
$ virsh net-list --all
名稱               狀態     自動啟動
-----------------------------------------
default              啟用     yes
HUB66                啟用     yes
}}}

''2. 匯出已存在虛擬網路設定檔''
{{{
$ virsh net-dumpxml HUB66
<network>
  <name>HUB66</name>
  <uuid>3c355699-78c9-2263-8739-2aa67d298ef7</uuid>
  <bridge name='virbr1' stp='on' delay='0' />
  <ip address='192.168.66.1' netmask='255.255.255.0'>
    <dhcp>
      <range start='192.168.66.128' end='192.168.66.250' />
    </dhcp>
  </ip>
</network>

$ virsh net-dumpxml HUB66 > HUB66.xml
}}}

''3. 取消虛擬網路定義''
{{{
$ virsh net-undefine HUB66
錯誤: 無法為網路 HUB66 取消定義
錯誤: internal error network is still active

$ virsh net-destroy HUB66                       (net-destroy 是停止虛擬網路, 而不是刪除虛擬網路)
網路 HUB66 已經刪除

$ virsh net-list --all
名稱               狀態     自動啟動
-----------------------------------------
default              啟用     yes
HUB66                非啟用中 yes

$ virsh net-undefine HUB66
網路 HUB66 已經取消定義

$ virsh net-list --all
名稱               狀態     自動啟動
-----------------------------------------
default              啟用     yes
}}}

''4. 重新定義虛擬網路''
{{{
$ virsh net-define HUB66.xml
網路 HUB66 定義自 HUB66.xml
}}}

''5. 啟動虛擬網路''
{{{
$ virsh net-start HUB66
}}}

<<toBalaNotes "1">>

{{item1{根據 MAC 位址配置 IP}}}

{{{
<ip address="192.168.122.1" netmask="255.255.255.0">
    <dhcp>
        <range start="192.168.122.100" end="192.168.122.254" />
        <host mac="00:16:3e:77:e2:ed" name="foo.example.com" ip="192.168.122.10" />
        <host mac="00:16:3e:3e:a9:1a" name="bar.example.com" ip="192.168.122.11" />
    </dhcp>
</ip>
}}}
<<toBalaNotes "2">>

{{item1{使用 nano 作為 virsh edit 命令的編輯器}}}
{{{
$ EDITOR=nano virsh edit UD104SJSD
}}}
///%1
//%/

///%2
//%/
<<forEachTiddler
    script '
        window.fetItemsPerPage = 20;

        function getHeader(context,count) {
            if (!window.fetStartIndex || window.fetStartIndex < 0) 
                window.fetStartIndex = 0;

            // ensure not to page behind the last page
            if (window.fetStartIndex >= count)
                window.fetStartIndex = Math.min(Math.max(window.fetStartIndex-window.fetItemsPerPage,0),count-1);

            createTiddlyButton(context.place,"<",null,
                    function(e) {
                        window.fetStartIndex -= window.fetItemsPerPage;
                        story.refreshTiddler(context.viewerTiddler.title,null,true);
                    });
            createTiddlyButton(context.place,">",null,
                    function(e) {
                        window.fetStartIndex += window.fetItemsPerPage;
                        story.refreshTiddler(context.viewerTiddler.title,null,true);
                    });

            var startNo = window.fetStartIndex+1;
            var endNo = Math.min(count,window.fetStartIndex+window.fetItemsPerPage);

            return "("+startNo+" - "+endNo+ " of "+ count + " items)\n";
        }
    '

    write
            '(index >= window.fetStartIndex) && (index < window.fetStartIndex + 20) ? "* [["+tiddler.title+"]]\n" : ""'

        begin
            'getHeader(context,count)'
>>

// /%
With a little scripting you can use the ~ForEachTiddlerPlugin to display the result "by pages". I.e. you don't display the full result list, but (e.g.) 10 at a time. 
Using a "pagewise" display may be useful when the result may get very large. This way you avoid scrolling the window to see the result. It also speeds up things since less items need to be display at a time.

''Code''
{{{
<<forEachTiddler
    script '
        window.fetItemsPerPage = 10;

        function getHeader(context,count) {
            if (!window.fetStartIndex || window.fetStartIndex < 0) 
                window.fetStartIndex = 0;

            // ensure not to page behind the last page
            if (window.fetStartIndex >= count)
                window.fetStartIndex = Math.min(Math.max(window.fetStartIndex-window.fetItemsPerPage,0),count-1);

            createTiddlyButton(context.place,"<",null,
                    function(e) {
                        window.fetStartIndex -= window.fetItemsPerPage;
                        story.refreshTiddler(context.viewerTiddler.title,null,true);
                    });
            createTiddlyButton(context.place,">",null,
                    function(e) {
                        window.fetStartIndex += window.fetItemsPerPage;
                        story.refreshTiddler(context.viewerTiddler.title,null,true);
                    });

            var startNo = window.fetStartIndex+1;
            var endNo = Math.min(count,window.fetStartIndex+window.fetItemsPerPage);

            return "("+startNo+" - "+endNo+ " of "+ count + " items)\n";
        }
    '

    write
            '(index >= window.fetStartIndex) && (index < window.fetStartIndex + 10) ? "* [["+tiddler.title+"]]\n" : ""'

        begin
            'getHeader(context,count)'
>>
}}}
// %/

{{item1{RDP 運作架構}}}

[img[img/kvm/RDP_Network.jpg]]

<<toBalaNotes "1">>

{{item1{Virt-Manager VNC 運作架構}}}

[img[img/kvm/VNC_Network.jpg]]

<<toBalaNotes "2">>

{{item1{NeatX 運作架構}}}

[img[img/kvm/NX_Network.jpg]]

<<toBalaNotes "3">>
///%1
//%/

///%2
//%/

///%3
//%/
感謝你使用土芭樂知識王 - 學習筆記本, 首先請你根據以下設定程序, 設定此筆記本的版面
__{{item1{修改筆記本主標題 (SiteTitle) 及 次標題 (SiteSubTitle)}}}__
1. 展開左邊選單中的 ''[知識王管理]'' 項目
2. 點選 ''[版面管理員]'', 然後設定平台標題文字

<<toBalaSWF2 "movie/KMKTitle.swf" "820" "610" "教學影片 - 修改筆記本標題文字">>

__{{item1{產生主選單樹狀文章架構}}}__
1. 展開左邊選單中的 ''[知識王管理]'' 項目
2. 點選 ''[版面管理員]'', 然後設定資訊樹架構

<<toBalaSWF2 "movie/KMKTree.swf" "820" "610" "教學影片 - 產生主選單樹狀文章架構">>

''[注意]'' 執行完上述二項設定, 記得點選工具列中的 ''[儲存變更]'' 按鈕, 儲存你的設定並重新載入

<<toBalaNotes "setup">>


///%setup
//%/
{{item1{WebKit 安裝}}}
本文網址 : https://help.ubuntu.com/community/WebKit

''1. 加入 WebKit 套件庫''
{{{
# add-apt-repository ppa:webkit-team
Executing: gpg --ignore-time-conflict --no-options --no-default-keyring --secret-keyring /etc/apt/secring.gpg --trustdb-name /etc/apt/trustdb.gpg --keyring /etc/apt/trusted.gpg --primary-keyring /etc/apt/trusted.gpg --keyserver keyserver.ubuntu.com --recv 612D9FE65C733A79BB2AB07D991E6CF92D9A3C5B
gpg: 正在請求金鑰 2D9A3C5B 自 hkp 伺服器 keyserver.ubuntu.com
gpg: 金鑰 2D9A3C5B: 公鑰 "Launchpad PPA for WebKit Team" 已匯入
gpg: 處理總量: 1
gpg:               已匯入: 1  (RSA: 1)

# apt-get update
}}}

''2. 安裝編譯所需套件''
{{{
$ apt-get install subversion gtk-doc-tools autoconf automake libtool libgtk2.0-dev libpango1.0-dev libicu-dev libxslt-dev libsoup2.4-dev libsqlite3-dev gperf bison flex libjpeg62-dev libpng12-dev libxt-dev autotools-dev libgstreamer-plugins-base0.10-dev libenchant-dev libgail-dev
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
autoconf 已經是最新版本了。
autoconf 被設定為手動安裝。
automake 已經是最新版本了。
automake 被設定為手動安裝。
libgtk2.0-dev 已經是最新版本了。
libpango1.0-dev 已經是最新版本了。
libpango1.0-dev 被設定為手動安裝。
注意,選擇了以 libxslt1-dev 替代 libxslt-dev
libjpeg62-dev 已經是最新版本了。
libjpeg62-dev 被設定為手動安裝。
libpng12-dev 已經是最新版本了。
libpng12-dev 被設定為手動安裝。
autotools-dev 已經是最新版本了。
autotools-dev 被設定為手動安裝。
下列的額外套件將被安裝:
  docbook docbook-dsssl docbook-to-man docbook-xsl docbook-xsl-doc-html
  gnome-common jade libapr1 libaprutil1 libgstreamer0.10-dev libltdl-dev
  libsoup2.4-1 libsp1c2 libsvn1 libxslt1-dev sp
建議套件:
  bison-doc psgml docbook-defguide jadetex docbook-dsssl-doc libsaxon-java
  libxalan2-java docbook-xsl-saxon fop xalan dbtoepub libgail-doc
  gstreamer0.10-doc icu-doc libtool-doc libsoup2.4-doc sqlite3-doc gfortran
  fortran95-compiler gcj subversion-tools db4.8-util
下列【新】套件將會被安裝:
  bison docbook docbook-dsssl docbook-to-man docbook-xsl docbook-xsl-doc-html
  flex gnome-common gperf gtk-doc-tools jade libapr1 libaprutil1
  libenchant-dev libgail-dev libgstreamer-plugins-base0.10-dev
  libgstreamer0.10-dev libicu-dev libltdl-dev libsoup2.4-dev libsp1c2
  libsqlite3-dev libsvn1 libtool libxslt1-dev libxt-dev sp subversion
下列套件將會被升級:
  libsoup2.4-1
升級 1 個,新安裝 28 個,移除 0 個,有 16 個未被升級。
需要下載 14.1MB 的套件檔。
此操作完成之後,會多佔用 69.0MB 的磁碟空間。
是否繼續進行 [Y/n]?y

}}}

''3. 下載 WebKit 原始碼 (請耐心等待)''
{{{
# svn checkout http://svn.webkit.org/repository/webkit/trunk ~/src/WebKit
}}}

''4. 編譯環境檢查''
{{{
$ cd WebKit-r71367

$ ./autogen.sh --prefix=/usr
configure.ac:81: installing `autotools/compile'
configure.ac:21: installing `autotools/config.guess'
configure.ac:21: installing `autotools/config.sub'
configure.ac:41: installing `autotools/install-sh'
configure.ac:41: installing `autotools/missing'
GNUmakefile.am: installing `autotools/depcomp'
checking build system type... i686-pc-linux-gnu
                               :
WebKit was configured with the following options:

Build configuration:
 Enable debugging (slow)                                  : no
 Enable GCC build optimization                            : yes
 Code coverage support                                    : no
 Unicode backend                                          : icu
 Font backend                                             : freetype
 Optimized memory allocator                               : yes
Features:
 3D Transforms                                            : no
 Blob support                                             : no
 Directory upload                                         : no
 Fast Mobile Scrolling                                    : no
 JIT compilation                                          : yes
 Filters support                                          : yes
 Geolocation support                                      : no
 JavaScript debugger/profiler support                     : yes
 MathML support                                           : no
 HTML5 offline web applications support                   : yes
 HTML5 channel messaging support                          : yes
 HTML5 client-side session and persistent storage support : yes
 HTML5 client-side database storage support               : yes
 HTML5 FileSystem API support                             : no
 HTML5 ruby support                                       : yes
 HTML5 sandboxed iframe support                           : yes
 HTML5 server-sent events support                         : yes
 HTML5 video element support                              : yes
 Icon database support                                    : yes
 Image resizer support                                    : no
 Opcode stats                                             : no
 SharedWorkers support                                    : yes
 Speech input support                                     : no
 SVG support                                              : yes
 SVG animation support                                    : yes
 SVG fonts support                                        : yes
 SVG foreign object support                               : yes
 SVG as image support                                     : yes
 SVG use element support                                  : yes
 WML support                                              : no
 Web Sockets support                                      : yes
 Web Timing support                                       : no
 Web Workers support                                      : yes
 XHTML-MP support                                         : no
 XPATH support                                            : yes
 XSLT support                                             : yes
GTK+ configuration:
 GTK+ version                                             : 2.0
 GDK target                                               : x11
 Hildon UI extensions                                     : no
 Introspection support                                    : no
}}}

''5. 開始編譯''
If your ./autogen.sh command runs without any hitches you are ready to compile WebKit. Issue the following command in your terminal and grab a beer:
{{{
$ make
}}}
To install run:
{{{
$ sudo make install
}}}
<<toBalaNotes "1">>

{{item1{WebKit 測試}}}
''1. WebKit 簡易瀏覽器''
Assuming no errors occurred during the build you can now test WebKit. Launch the demo browser by issuing the following command in your terminal:
{{{
~/WebKit/Programs/GtkLauncher
}}}

''2. 小工具展示網站''
http://webkit.org/demos/

<<toBalaNotes "2">>



///%1
//%/

///%2
//%/
''參考網站''
1. Windows VirtIO Drivers (有提供 Windows 7)
http://www.linux-kvm.org/page/WindowsGuestDrivers/Download_Drivers
2. Using KVM directly
https://help.ubuntu.com/community/KVM/Directly
3. QEMU – Emulating Raspberry Pi the easy way
http://xecdesign.com/qemu-emulating-raspberry-pi-the-easy-way/
''3. qemu-kvm introduction (重要)''
http://alexander.holbreich.org/2013/03/qemu-kvm-introduction/
4. ''Linux KVM 魔法石'' - Tiny Server Core 安裝與啟動 (使用核心參數開機)
http://linuxkvm.blogspot.tw/2013/01/linux-kvm-tiny-server-core.html

{{item1{初試 Tiny Server Core 虛擬電腦}}}
''Tiny Server Core (TSC)'' 是將 ''[[Tiny Core Linux|http://distro.ibiblio.org/tinycorelinux/welcome.html]]'' 這發行套件的 Core 版本 (8M) 重製而成, TSC 目前提供啟動模式, 有二種分別是 Live CD 及 Kernel. 本文主要以 Live CD 運作模式, 為你說明它的基本操作. 如需了解如何製作 TSC, 請參考 ''[[重製 Tiny Core Linux 光碟|http://linuxkvm.blogspot.tw/2012/06/tiny-server-core.html]]'' 這篇文章

''1. 下載 Tiny Server Core Live CD''

網址 : http://tobala.net/download/tsc32.iso

''2. 啟動 Tiny Server Core 虛擬電腦''
Tiny Server Core 這套 Linux 系統, 可由 Live CD 方式啟動, 啟動命令如下 : 
{{{
$ kvm -m 128 -cdrom tsc32.iso -boot d
}}}

''[註]'' -boot d 代表從 光碟開機

事實上 kvm 命令只是一個連接檔, 可由以下命令得知 :
{{{
$ ll /usr/bin/kvm
lrwxrwxrwx 1 root root 18 2011-01-16 20:53 /usr/bin/kvm -> qemu-system-x86_64*
}}}

''3. 檢視 Tiny Server Core 網路設定''

[img[img/tsc/tsc32net.png]]

''4. 網路測通''
QEMU 內建網路系統會阻擋 ICMP 封包, 而其他通訊協定可運作 (HTTP, Telnet, SSH,...)

[img[img/tsc/tsc32telnet.png]]

''5. 關機''
{{{
$ sudo poweroff
}}}
<<toBalaNotes "1">>
{{item1{直接由 Linux 核心檔 及 RAM 磁碟檔啟動}}}

''qemu-kvm 啟動參數''
{{{
-kernel bzImage use 'bzImage' as kernel image
-append cmdline use 'cmdline' as kernel command line
-initrd file use 'file' as initial ram disk
}}}

''1. 啟動 Tiny Server Core 虛擬電腦''
{{{
$ cd 

$ kvm -m 512  -kernel tsc/vmlinuz32 -initrd tsc/tsc532.gz -nographic -curses   
}}}

啟動後畫面, 如下 :

[img[img/tsc/tsckernel01.png]]

''2. 透過核心參數設定 Tiny Server Core 虛擬電腦的 IP 位址''
{{{
$ kvm -m 512  -kernel tsc/vmlinuz32 -initrd tsc/tsc532.gz -nographic -curses  -append "ipv4=192.168.0.1:255.255.255.0:192.168.0.254:168.95.1.1"
}}}

啟動後畫面, 如下 :

[img[img/tsc/tsckernel02.png]]

<<toBalaNotes "2">>



///%1
//%/

///%2
//%/
''參考文章''
1. Networking with Android
http://www.ibm.com/developerworks/opensource/library/os-android-networking/

Android 是行動裝置的軟體套件,其中包括作業系統、中介軟體,以及主要應用程式。Android SDK 包含所有必要的工具和 API,以協助使用者開發適用於 Android 裝置的應用程式。

這個網站提供以 Android 平台為基礎的 Google 專案相關資訊,例如可擴充 Android 平台的外部程式庫、Android 應用程式、代管的服務和 API,以及 Android Developer Contest 等。網站上的所有內容皆是由 Google 提供,目的在於協助 Android 開發人員。

如果您需要的是 Android 的一般資訊,請造訪 http://www.android.com 網站。如果您有興趣開發 Android 裝置的應用程式,請造訪「Android 開發人員」網站,網址為 http://developer.android.com 

{{item1{下載 Android SDK}}}

下載網站 : http://developer.android.com/sdk/index.html

<<toBalaNotes "android">>




///%android
//%/
Android-x86 官方網址 : http://www.android-x86.org/

Android-x86 開源碼專案是由國內開源碼前輩 黃志偉先生 所成立, 此專案的目地是將 Android 系統運作在許多 x86 平台(小筆電, 桌上電腦, 平板電腦), 甚至還可在 Linux KVM 及 VirtualBox 虛擬平台中執行. Android-x86 可使沒有 Android 手機的開發人員, 一樣可研究 Android 系統, 可真造福許多人

This is a project to port Android open source project to x86 platform, formerly known as "patch hosting for android x86 support". The original plan is to host different patches for android x86 support from open source community. A few months after we created the project, we found out that we could do much more than just hosting patches. So we decide to create our code base to provide support on different x86 platforms, and set up a git server to host it.

This is an open source project licensed under Apache Public License 2.0.

<<toBalaNotes "1">>


///%1
//%/
''參考文章''
1. [[Part One: Nice URLs with Rewrite Rules and Virtual Hosts|http://blog.couch.io/post/443028592/whats-new-in-apache-couchdb-0-11-part-one-nice-urls]]
2. [[Part Two: Views; JOINs Redux, Raw Collation for Speed|http://blog.couch.io/post/446015664/whats-new-in-apache-couchdb-0-11-part-two-views]]
3. [[Part Three: New Features in Replication|http://blog.couch.io/post/468392274/whats-new-in-apache-couchdb-0-11-part-three-new]]
4. Create offline web applications on mobile and stationary devices with CouchDB
http://www.ibm.com/developerworks/web/library/wa-couchdb/

CouchDB 官方網站 : http://couchdb.apache.org/



''參考文章''
1. Setting up a Floppy-based Firewall with floppyfw
http://support.moonpoint.com/os/unix/linux/network/firewall/floppyfw-setup.php
2. Understanding Floppy Firewall
http://radagast.bglug.ca/how_to_build_a_firewall/understanding_floppyfw.html

{{item1{認識 FloppyFW}}}
官方網址 : http://www.zelow.no/floppyfw/

Linux 下的防火牆 (firewall) 從誕生到現在,防火牆主要經歷了四個發展階段:第一階段:基於路由器的防火牆;第二階段用戶化的防火牆工具套; 第三階段:建立在通用作業系統上的防火牆;第四階段:具有安全作業系統的防火牆。目前世界上大多數防火牆供應商提供的都是具有安全作業系統的軟硬體結合的防火牆,像著名的 NETEYE、NETSCREEN、TALENTIT 等。在 Linux 作業系統上的防火牆軟體也很多,有些是商用版本的防火牆,有的則是完全免費和公開源代碼的防火牆。大多數 Linux 教程都提到了如何在 Linux 平台中使用 IPCHAINS 來構築防火牆。設置和管理 Linux 作業系統中的防火牆是網路系統管理員的重要工作。

有沒有能隨身攜帶的,使用方便的 Linux 防火牆呢? 答案是有的,現在我就向大家介紹一種能裝在普通軟碟裏面的 Linux 防火牆。這套名字叫 floppyfw 的 Linux 防火牆能存放在一張普通的軟碟裏,並獨立的在 RAM 記憶體中運行。使用它能啟動電腦,利用 ipchains 過濾掉無用的 IP 包,還可以使用它來配置 IP 偽裝(IP masquerade),監視端口,通過它可以使用主機對其他網路中的電腦進行遠程式控制制。Floppyfw 功能十分強大,但是它運行所需要的硬體環境卻非常低,除了需要一張軟碟之外,只要 8MB 的記憶體就足夠了。

Floppyfw 需要的最低硬體設備如下: 最少 8MB 記憶體 3.5" 軟碟機, 顯示卡, 鍵盤, 顯示器

有的 Linux 系統中裝兩塊網卡,能使得 Floppyfw 正常工作,這就需要每一塊網卡的 IRQ 和記憶體地址都正確無誤。在 Linux 系統中配置雙網卡相信很多系統管理員都是輕車熟路的。

Floppyfw 支援以下的網卡。 3Com 3c509 NE2000 compatibles Tulip-based Intel EtherExpress PCI

''關於軟體:''
把 Floppyfw 做成一張可以引導的軟磁片是一件非常簡單的事情。不過你要首先到 http://www.zelow.no/floppyfw/download/ 把Floppyfw下載到電腦的硬盤上。Floppyfw 最新的版本應該是 1.0.5 或者更高,Floppyfw 是一個鏡像文件,可以使用 
{{{
# dd if=floppyfw-1.0.5.img of=/dev/fd0 bs=72k
}}}
這個命令把鏡像文件解壓並寫到準備好的軟碟上。

''關於設置:''

需要注意的是,一般的軟碟格式化以後都是 DOS(FAT) 的格式。為了能順利的啟動 Linux 系統,我們需要在這張軟碟上作一些修改。建議使用其他的電腦來修改這張軟碟,如果在 Linux 系統中使用 MTOOLS工 具修改則更好。

使用命令如下: $ cd /tmp $ mcopy a:config $ vi config $ mcopy config a:

如果你使用的是其他的作業系統,我想在 WINDOWS 中可以使用記事本進行修改。在軟碟中,我們可以看到 floppyfw 一共有 5 個文件: config (主配置文件) firewall.ini (過濾規則) modules.lst (附加的 ip_masq 模組) sysLinux.cfg (內核啟動參量) syslog.cfg (syslog 配置, 例如/etc/syslog.conf)

在一般情況下,我們不需要修改 sysLinux.cfg 或者 modules.lst 文件。我們主要的任務就是要修改 config 這個文件。為了簡單明瞭的說明問題,我在這裡不想過多的解釋 config 這個文件裏面的具體的配置清單,只是著重說明 config 文件末尾幾個重要的事項。

在(/bin/ash) 找到"OPEN_SHELL controls shell"這行文字,如果您的電腦的記憶體少於 12MB,把ONLY_8M設置成"Y"。USE_SYSLOG 能測定系統中 syslogd是 否運行,而 SYSLOG_FLAGS 則是判斷 syslogd 啟動的標誌。用戶可以根據自己的實際情況進行修改。

附錄:配置清單一,這是一個通過測試的標準配置清單。由於這個 Linux 系統中沒有提供 DHCP 服務,使用的靜態的 IP,所以僅供有相似服務的用戶提供參考。點擊這裡下載清單一

''關於過濾規則:''
現在,讓我們再來看看 firewall.ini 文件。沒有修改之前的 floppyfw 的 firewall.ini文 件默認設置了靜態的IP偽裝和拒絕一些固定端口的訪問。因為我們需要建立自己的防火牆,所以我們需要對 firewall.ini 文件進行修改。我們需要全面的設置過濾規則,關閉一些我們認為存在前在危險的端口。

在這裡因為篇幅的關係我就不再講解如何設置 ipchains。如果您想知道更詳細的 ipchains 的配置方案和具體使用方法,推薦您參考以下的這個國外的 Linux 防火牆 ipchains 配置方案。

firewall.ini 的過濾規則的具體設置可以參考配置清單二(ftp://ftp.mfi.com/pub/sysadmin/2001/jan2001.tar.z),這是一個已經修改好了的配置。如果你對 Linux 的防火牆不太熟悉,那麼可以直接下載這個配置清單來進行參考或者直接使用。

清單二可以提供最基本的 DNS, SMTP, POP, NNTP, TELNET, SSH, FTP, HTTP, 和 WHOIS 服務,一般的客戶端電腦都可以通過安全的端口訪問網路和使用以上的服務。
 
''關於LOG''

一般的 Linux 系統中的 LOG 文件可不少,主要是記錄系統運行中的一些主要參數和記錄。上面已經說過了,syslog.cfg 就是一個管理和記錄 LOG 的文件。Floppyfw 能通過這個 syslog.cfg 文件記錄下 Linux 防火牆系統中的控制記錄,例如鍵盤錯誤,顯示器沒有安裝等資訊也被如是的記錄下來。這為今後系統管理員分析和解決系統問題提供了有利的依據。syslog.cfg 的設置也不難,首先把syslog.cfg設置成某台電腦的主記錄文件。例如,在Red Hat系統中,通過編輯 /etc/rc.d/init.d/syslog 可以達到目的。如果這台電腦的IP是192.168.1.2,那麼在 syslog.cfg則要配置成一致的IP。具體的配置清單可以參考"清單三"(ftp://ftp.mfi.com/pub/sysadmin/2001/jan2001.tar.z)

一旦你把前面三個主要的文件配置好了以後,那麼你就可以通過這張軟碟啟動 Linux 系統進行測試了。

<<toBalaNotes "1">>

{{item1{Why Squashfs LZMA?}}}
LZMA is one of the best compression algorithms and Squashfs is one of the best compressed filesystems available nowadays. This project combined both to make something even better. LZMA can compress much better then gzip, which was the only option in squashfs for years. Compressing to LZMA format is very slow, but this is not a problem as the squashfs filesystem needs to be compressed only once. Then it's only read million times; so what we care about is the speed of decompression. LZMA decompression is slower than gzip, but it's even very fast, about 10-20 MB/s on a 2GHz CPU. 

///%1
//%/
''參考文章''
1. 初探 Hadoop 開放原始碼平台環境 (很棒的文章)
http://www.runpc.com.tw/content/cloud_content.aspx?id=105318
2. 開發 Hadoop 雲端應用沒那麼難 
http://www.ithome.com.tw/itadm/article.php?c=58008
3. 微軟巨量資料策略轉向,全面支援 Hadoop 
http://www.ithome.com.tw/itadm/article.php?c=77576
4. Deriving new business insights with Big Data
http://www.ibm.com/developerworks/opensource/library/os-bigdata/index.html
5. 中華電信用 Hadoop 技術分析通話明細 
http://www.ithome.com.tw/itadm/article.php?c=68023
6. 台積電、聯電-剖析上億筆機臺記錄,尋找新世代製程優化關鍵 
http://www.ithome.com.tw/itadm/article.php?c=76526&s=2
7. 中華電信-了解千萬用戶消費行為,聚焦高價值顧客區隔服務 
http://www.ithome.com.tw/itadm/article.php?c=76526&s=3
8. 元大銀行-解決資料倉儲效能瓶頸,一次滿足未來擴充需求
http://www.ithome.com.tw/itadm/article.php?c=76526&s=4
9. HP推出首款 Hadoop 運算平臺
http://www.ithome.com.tw/itadm/article.php?c=74330

{{item1{Hadoop 簡介}}}
官方網址 : http://hadoop.apache.org/
Hadoop 是什麼?它本來是 Apache.org 在 Lucene下的一個專案,由 Dong Cutting 所開發,而在 Lucene 的發展歷史中,在搜尋引擎的應用上因 Google 的走紅,而使 Dong Cutting 也參考了由 Google 所發表的 MapReduce 和 GFS ( Google File System ) 的發表論文後 ( Google 是用這兩樣技術進行電腦的聯合運算,也就是雲端運算技術 ),在 Hadoop 專案中,將 MapReduce 及 GFS 進行了兩方面的實作,從這個開發原始碼的應用,使得大型機器在快速運算的能力延伸到了多台機器的聯合運算,並且利用分散式的架構概念及日漸成熟的 PC 運算,達到和大型主機類似的功能。

Hadoop 的專案圖象是一頭大象,在 Hadoop 的專案網頁裡有它功能的介紹:「Hadoop is a software platform that lets one easily write and run applications that process vast amount of data.」一開始就點明了它用來處理程式在大量檔案的處理。而這個 Hadoop 專案所要應付和解決的方案是什麼呢?在網頁的簡介下也說明的很清楚:

1. Hadoop can reliably store and process petabytes.用來處理大量的資料和儲存的能力。

2. It distributes the data and processing across clusters of commonly available computers.These clusters can number into the thousands of nodes.將資料和處理程序分散到可以使用的電腦上,並且這些電腦的數 量可以達到上千台之多。

3. By distributing the data, Hadoop can process it in parallel on the nodes where the data is located. This make it extremely  rapid. 借由分散檔案的處理,Hadoop 可以平行的運算這些檔案,並且使得檔案的處理 變的可以快速的回應。

4. Hadoop automatically maintains multiple copies of data and automatically redeloys computing tasks based on failures.這一個機制使得 Hadoop 成為可以信賴的運算和放置資料的平台,它可以將運算的程式 和放置的資料在每一個可以運行的 node 間進行複製和自動化的備份,可以避免執行中的程式或存放的資 料,因為電腦的硬體或系統的上的損壞而使程式消失或檔案損毀。

{{item1{Hadoop 計畫分割}}}
應該有些人注意到前一陣子 Hadoop 決定進行計畫分割,最主要的改變是將原本 Hadoop Core 這個計畫分割為三個計畫, Hadoop Common, HDFS 和 MapReduce. 為何要進行這樣的改變呢? 主要的原因是 Hadoop 越來越熱門了! 首先碰到問題是原先開發用的 mail listing 量暴增 , 每天都有數十個以上的 JIRA 訊息. 再來是計畫本身的 code base 也不斷的在長大 (300,000+ lines of Java) 另外就是之前有蠻多人很有興趣的部份,單獨使用 HDFS. 因此決定將原本的 Hadoop Core 分成三個計畫, 分別有各自的 svn repository , JIRA 和 mail lists. 原先使用者用的 core-user@hadoop.apache.org 更名為 common-user@, 另外對應產生了 mapreduce-user@ 和 hdfs-user@.

對於使用 Hadoop上面來說, 短時間內 Stable 版本(0.18,0.19,0.20)不會有影響, 到 0.20.1 仍然會是以 single project 的方式 release. 之後每個 project 會分別有有自己的 release , 使用上將會需要同時安裝數個 packages. 同時 Configure 上也需要注意, 從 0.20 開始應該已經要把單一的 hadoop-site.xml 分開為 mapred-site 和 hdfs-site , 0.21 版之後才可繼續使用. 至於 API 方面, 從 0.20 開始 MapReduce 已經導入新的 API 但是尚未完全使用 , 將會在 0.20.1 修復完成, 同時 0.20.1 release 之後也代表了你應該準備升級你原來的程式碼了.

{{item1{Hadoop 子專案}}}
Hadoop 的定位是用來處理與保存大量資料的雲端運算平台,目前屬於Apache頂層專案,在 Hadoop 中包含了最著名的分散式檔案系統(HDFS)、MapReduce 框架、儲存系統 (HBase)等元件,如圖3所示,以及根據 Hadoop 延伸發展的其他子專案:
●Core:一組用於分散式檔案系統和一般性I/O之用的元件和介面。
●Avro:提供高效能、跨語言以及可保存資料的RPC資料序列化系統。
●Pig:超大資料集的資料流語言以及執行環境,可在HDFS和MapReduce叢集環境中執行。
●ZooKeeper:分散式且高可用性的協調服務,可為建置分散式系統提供分散式鎖定等原始鎖定功能。
●Hive:分散式資料倉儲,透過Hiave可管理存放於HDFS的資料,並提供根據SQL發展的查詢語言來查詢資料。
●Chukwa:分散式資料收集和分析系統,其會執行收集器以便在HDFS中儲存資料,且會使用MapReduce來產生報表。
 
{{item1{Cluster of machines running Hadoop at Yahoo!}}}

[img[img/380px-Yahoo-hadoop-cluster_OSCON_2007.jpg]]

{{item1{Hadoop 簡史}}}

 2003 年 2 月 : Google 撰寫第一個 MapReduce 程式庫。

 2003 年 10 月 : Google 發表Goofle File System (GFS) 論文。

 2004 年 12 月 : Google 發表 MapReduce 論文。

 2005 年 7 月 : Doug Cutting 在 Nutch 開始採用 MapReduce 實作。

 2006 年 2 月 : Hadoop 程式碼從 Nutch 移至 Lucene 子專案。

 2006 年 11 月 : Google 發表 Bigtable 論文。

 2007 年 2 月 : Mike Cafarella 發佈第一個 Hbase 程式碼。

 2007 年 4 月 : Yahoo! 在 1000 個節點叢集上執行 Hadoop。

 2008 年 1 月 : Hadoop 成為 Apache 頂層專案。

<<toBalaNotes "hadoop">>




///%hadoop
//%/

///%1
//%/

///%2
//%/
Linux KVM 魔法書這套系統, 要感謝我的學生與同好這一年熱心支持. 才得以順利推出. Linux KVM 魔法書主要是提供 ''IT 資訊專家'' 及 ''資料科學家'', 一個可擴展及可客製化的資訊實驗平台.

利用此資訊實驗平台, 使用者只需製定 XML 系統架構檔, Linux KVM 魔法書便會為你建立所需的實驗系統. 目前實作成功的系統有 1.企業靜態路由網路系統   2.企業網路服務 (Apache, Samba,..)系統  3.海量分散檔案系統 (Hadoop)  4. IPv6 企業網路系統等.  成功實作的系統架構圖, 如下 :

{{item1{企業靜態路由網路系統}}}

[img[img/Lab/Lab101.png]]
<<toBalaNotes "1">>
{{item1{企業網路服務系統  (DNS Server, WINS Server, DHCP server,..)}}}

[img[img/Lab/Lab201.png]]
<<toBalaNotes "2">>
{{item1{海量分散檔案系統 (Hadoop)}}}

[img[img/Lab/Lab301.png]]
<<toBalaNotes "3">>
{{item1{IPv6 企業網路系統}}}

[img[img/Lab/Lab6010.png]]
<<toBalaNotes "4">>

///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/
''參考文章''
1. virsh 操作文件
http://www.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/5/html/Virtualization_Guide/chap-Virtualization-Managing_guests_with_virsh.html
2. Ubuntu/KVM
https://help.ubuntu.com/community/KVM/Virsh
3. Getting started with LXC using libvirt
http://berrange.com/posts/2011/09/27/getting-started-with-lxc-using-libvirt/

{{item1{認識 virsh 命令}}}
''virsh'' is a program which uses libvirt to manage KVM virtual machines (guests). I’m going to show a summary of its main functions (cheat sheet style):

''Show guests’ information:''
{{{
* Show a list of all the defined guests: virsh list --all
* Show a guest’s info: virsh dominfo guest's_name
}}}

''Start and stop guests:''
{{{
* Start a guest: virsh start guest's_name
* Shutdown a guest (gently): virsh shutdown guest's_name
* Shutdown a guest (wild): virsh destroy guest's_name
* Suspend a guest: virsh suspend guest's_name
* Resume a suspended guest: virsh resume guest's_name
}}}

''Create and modify guests:''
{{{
* Create a new guest
* Create a guest from its XML definition: virsh create xml_file.xml
* Dump a guest’s definition in XML: virsh dumpxml guest's_name
* Modify a guest’s definition: virsh edit guest's_name
* Remove a guest’s definition (it doesn’t remove the image file): virsh undefine guest's_name
}}}

''Backup and restore guests:''
{{{
* Save a guest’s state on a file: virsh save guest's_name guest's_state_file
* Restore a guest from a state file: virsh restore guest's_state_file
}}}
<<toBalaNotes "1">>
{{item1{安裝 Libvirt}}}

''1. 安裝 libvirt-bin 套件''
{{{
$ sudo apt-get install libvirt-bin
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  cgroup-lite ebtables gawk libapparmor1 libnuma1 libsigsegv2 libvirt0
  libxenstore3.0 libxml2-utils
建議套件:
  radvd lvm2
下列【新】套件將會被安裝:
  cgroup-lite ebtables gawk libapparmor1 libnuma1 libsigsegv2 libvirt-bin
  libvirt0 libxenstore3.0 libxml2-utils
升級 0 個,新安裝 10 個,移除 0 個,有 0 個未被升級。
需要下載 2,785 kB 的套件檔。
此操作完成之後,會多佔用 9,482 kB 的磁碟空間。
是否繼續進行 [Y/n]?
}}}

''2. 重新啟動系統 (一定要做)''
{{{
$ sudo reboot 
}}}

{{item1{認識 Libvirt 管理平台架構}}}

''1. 檢視 Libvirt 元件版本資訊''

Ubuntu 10.04
{{{
$ virsh version
用此函式庫來編譯:libvir 0.7.5
使用函式庫:libvir 0.7.5
使用 API:QEMU 0.7.5
執行 hypervisor:QEMU 0.12.3
}}}

Ubuntu 12.04
{{{
$ virsh version
用此函式庫來編譯:libvir 0.9.8
使用函式庫:libvir 0.9.8
使用 API:QEMU 0.9.8
執行 hypervisor:QEMU 1.0.0
}}}

''2. 列出目前 "KVM 操作主機" 硬體規格''
{{{
$ virsh nodeinfo
處理器類型:   i686
處理器數目:   2
處理器的頻率: 2003 MHz
處理器的插槽: 1
每個插槽的核心: 2
每個核心的執行續: 1
NUMA cell:         1
記憶體大小:   4019588 kB
}}}

''3. Libvirtd 系統服務''
{{{
student@US1204:~$ ps aux | grep libvirtd
root      1273  0.3  0.2 265044  6108 ?        Sl   13:15   0:00 /usr/sbin/libvirtd -d
student   2345  0.0  0.0  13624   944 pts/0    S+   13:19   0:00 grep --color=auto libvirtd
}}}

{{item1{Libvirt 目錄結構}}}

''設定檔目錄 -  /etc/libvirt/ (擁有人是 root)''
{{{
$ tree -up /etc/libvirt/
/etc/libvirt/
├── [drwxr-xr-x root    ]  hooks
├── [-rw-r--r-- root    ]  libvirt.conf
├── [-rw-r--r-- root    ]  libvirtd.conf
├── [-rw-r--r-- root    ]  lxc.conf
├── [drwxr-xr-x root    ]  nwfilter
│ ├── [-rw-r--r-- root    ]  allow-arp.xml
│ ├── [-rw-r--r-- root    ]  allow-dhcp-server.xml
│ ├── [-rw-r--r-- root    ]  allow-dhcp.xml
│ ├── [-rw-r--r-- root    ]  allow-incoming-ipv4.xml
│ ├── [-rw-r--r-- root    ]  allow-ipv4.xml
│ ├── [-rw-r--r-- root    ]  clean-traffic.xml
│ ├── [-rw-r--r-- root    ]  no-arp-ip-spoofing.xml
│ ├── [-rw-r--r-- root    ]  no-arp-mac-spoofing.xml
│ ├── [-rw-r--r-- root    ]  no-arp-spoofing.xml
│ ├── [-rw-r--r-- root    ]  no-ip-multicast.xml
│ ├── [-rw-r--r-- root    ]  no-ip-spoofing.xml
│ ├── [-rw-r--r-- root    ]  no-mac-broadcast.xml
│ ├── [-rw-r--r-- root    ]  no-mac-spoofing.xml
│ ├── [-rw-r--r-- root    ]  no-other-l2-traffic.xml
│ ├── [-rw-r--r-- root    ]  no-other-rarp-traffic.xml
│ ├── [-rw-r--r-- root    ]  qemu-announce-self-rarp.xml
│ └── [-rw-r--r-- root    ]  qemu-announce-self.xml
├── [drwxr-xr-x root    ]  qemu
│ └── [drwxr-xr-x root    ]  networks
│     ├── [drwxr-xr-x root    ]  autostart
│     │ └── [lrwxrwxrwx root    ]  default.xml -> /etc/libvirt/qemu/networks/default.xml
│     └── [-rw-r--r-- root    ]  default.xml
└── [-rw-r--r-- root    ]  qemu.conf

5 directories, 23 files
}}}

''內定硬碟檔儲存目錄''
{{{
$ sudo tree /var/lib/libvirt/
/var/lib/libvirt/
├── boot
├── dnsmasq
│ └── default.leases
├── images                            # 硬碟檔儲存目錄
│ ├── W2K8.img
│ ├── Win7.img
│ └── WS2K3.img
├── libxl
│ └── save
├── lxc
├── network
│ ├── default.xml
│ ├── HUB100.xml
│ ├── HUB660.xml
│ ├── HUB661.xml
│ └── HUB88.xml
├── qemu
│ ├── dump
│ ├── GW100.monitor
│ ├── save
│ └── snapshot
└── uml
}}}

''虛擬電腦運作 Log 檔儲存目錄''
{{{
$ sudo tree /var/log/libvirt/
/var/log/libvirt/
├── libvirtd.log
├── libxl
│ └── libxl.log
├── lxc
├── qemu
│ ├── GW100.log
│ ├── R660.log
│ ├── R88.log
│ ├── TC660.log
│ ├── TC88.log
│ ├── W2K8.log
│ ├── Win2k.log
│ ├── Win7.log
│ ├── Win98.log
│ └── WS2K3.log
└── uml
}}}

<<toBalaNotes "2">>



///%1
//%/

///%2
//%/
1. MongoDB 文檔閱讀筆記
http://www.kui.name/blog/2011/mongodb-%E6%96%87%E6%AA%94%E9%96%B1%E8%AE%80%E7%AD%86%E8%A8%98.html
2. 淺談 NoSQL - Mongo DB
http://blog.roodo.com/develop/archives/cat_830317.html
3. Sleepy.Mongoose: A MongoDB REST Interface
http://www.snailinaturtleneck.com/blog/2010/02/22/sleepy-mongoose-a-mongodb-rest-interface/

4. Java Tutorial - MongoDB (必讀)
http://www.mongodb.org/display/DOCS/Java+Tutorial
官方網站:http://www.tiddlywiki.com/

|Jeremy Ruston[img[img/Jeremy.jpg]] |<< @@font-size:16pt;color:#00f;line-height:25pt;~TiddlyWiki is a single-file, self-contained wiki for managing micro-content, written in ~JavaScript.@@|
|borderless|k

{{item1{It's a single file}}}
<nowiki>A complete TiddlyWiki is stored in a single file on your computer, and thus belongs to the class of Single Page Applications. That makes it super-easy to move your TiddlyWiki around on a USB stick or by emailing it to yourself. 
</nowiki>

{{item1{It's self-contained}}}
<nowiki>The single file that is a TiddlyWiki contains not only all of your data, but all the machinery to edit and manuipulate it. All you need is a modern web browser, like Mozilla Firefox or Microsoft Internet Explorer. You don't need to have any other special programs installed on your computer, and you don't need to be connected to the Internet. The same TiddlyWiki file will work on just about any computer: Windows PCs, Apple Macs, Linux and BSD boxes.. 
</nowiki>

{{item1{It's a wiki}}}
<nowiki>A wiki is a collection of web pages, like a normal web site, except that every page can be edited, easily and immediately. Wiki systems are often used to collaboratively manage documention for large projects, and also sometimes used by a single user as a kind of personal notebook. 
</nowiki>

{{item1{It manages micro-content}}}
<nowiki>Most web sites and wikis manage information as pages. TiddlyWiki is different -- it saves your stuff in smaller chunks (each chunk is called a 'tiddler'). Information in small chunks like this is called 'micro-content', and once you start usingTiddlyWiki, you realise that micro-content is a natural fit to a lot of the stuff you deal with every day. 
</nowiki>

<<toBalaNotes "TiddlyWiki">>


///%TiddlyWiki
//%/
Tiny Core 是由 Robert Shingledecker 所設計, Robert 也是 Damn Small Linux (DSL) 創始人. Tiny Core 大小雖然只有 40MB, 所使用的 Linux 核心卻是 2.6.33, 在應用上提供將近上百個套件, 可使你根據需求, 立即創作出你自己的 Linux 發行套件.

Tiny Core Linux 發行套件, 2011 年在 [[DistroWatch Linux|http://distrowatch.com/]] 網站一直排名在前三十名

Tiny Core Linux 網址 : http://distro.ibiblio.org/tinycorelinux/welcome.html

[[1. Tiny Core 微型系統安裝 - Micro Core |http://linuxkvm.blogspot.com/2011/07/tiny-core.html]]

[[2. Tiny Core 系統運作 (Frugal 模式) |http://linuxkvm.blogspot.com/2011/07/tiny-core-frugal.html]]

<<toBalaNotes "1">>


///%1
//%/
''參考文章''
1. Anatomy of the libvirt virtualization library
http://www.ibm.com/developerworks/linux/library/l-libvirt/index.html
2. Virtio: An I/O virtualization framework for Linux
http://www.ibm.com/developerworks/linux/library/l-virtio/index.html
3. Linux container tools (簡易虛擬技術)
http://www.ibm.com/developerworks/linux/library/l-lxc-containers/index.html
4. Windows VirtIO Drivers (有提供 Windows 7)
http://www.linux-kvm.org/page/WindowsGuestDrivers/Download_Drivers
5. Ubuntu libvirt 官方文件
http://doc.ubuntu.com/ubuntu/serverguide/C/libvirt.html
6. Virtio install Windows 7 KVM (x64/x86) on Ubuntu 10.04.1 Server via DNJL PPA
http://bderzhavets.wordpress.com/2011/01/20/virtio-install-windows-7-kvm-x64x86-on-ubuntu-10-04-1-server-via-dnjl-ppa/
7. Windows VirtIO Drivers
http://www.linux-kvm.org/page/WindowsGuestDrivers/Download_Drivers
8. Latest Release of Windows Virtio Network Drivers
http://www.linux-kvm.com/content/latest-release-windows-virtio-network-drivers
9. Redhat 5.4 Windows Virtio Drivers Part 2: Block Drivers
http://www.linux-kvm.com/content/redhat-54-windows-virtio-drivers-part-2-block-drivers
10. Libvirt Virtual Networking (很重要)
http://wiki.libvirt.org/page/VirtualNetworking 

{{item1{libvirt (由 RedHat 維護)}}}
libvirt 官方網站 : http://libvirt.org/

''libvirt is an open source API and management tool for managing platform virtualization''. It is used to manage Linux KVM and Xen virtual machines through graphical interfaces such as ''Virtual Machine Manager'' and higher level tools such as ''oVirt''. Note that libvirt does not yet support some important virtualization features like snapshots.

''libvirt itself is a C library'', but it has bindings in other languages, notably in Python and Perl.

''Development of libvirt is backed by Red Hat'', but it is available on most Linux distributions; remote servers are also accessible from Microsoft Windows clients.

{{op1{libvirt 本機運作架構 (目前沒支援 MS Hyper-V)}}}

[img[img/kvm/libvirt01.gif]]

''[重點]''
Also shown is a comparison of the terminology that libvirt uses. This terminology is important, as these terms are used in API naming. The two fundamental differences are that libvirt calls the ''physical host'' a ''node'', and the ''guest operating system'' is called a ''domain''. Note here that libvirt (and its application) runs in the domain of the ''host Linux operating system (domain 0)''. 

''libvirt 遠端運作架構''

[img[img/kvm/libvirt02.gif]]

''檢視 libvirt 版本''
{{{
# libvirtd --version
libvirtd (libvirt) 0.7.5
}}}

<<toBalaNotes "1">>
{{item1{virtio}}}
''Virtio'' is a Linux standard for ''network'' and ''disk'' device drivers where just the guest's device driver "knows" it is running in a virtual environment, and cooperates with the hypervisor. This enables guests to get high performance network and disk operations, and gives most of the performance benefits of paravirtualization.

''[Note]'' that virtio is different, but architecturally similar to, Xen paravirtualized device drivers (such as the ones that you can install in a Windows guest to make it go faster under Xen). ''Also similar is VMWare's Guest Tools''. 

''virtio 運作架構''

[img[img/kvm/virtio01.gif]]

Note that in reality (though not required), the device emulation occurs in user space using QEMU, so the back-end drivers communicate into the user space of the hypervisor to facilitate I/O through QEMU. QEMU is a system emulator that, in addition to providing a guest operating system virtualization platform, provides emulation of an entire system (PCI host controller, disk, network, video hardware, USB controller, and other hardware elements).

The virtio API relies on a simple buffer abstraction to encapsulate the command and data needs of the guest. Let's look at the internals of the virtio API and its components.

''virtio 細部運作架構''

[img[img/kvm/virtio02.gif]]

In addition to the front-end drivers (implemented in the guest operating system) and the back-end drivers (implemented in the hypervisor), virtio defines two layers to support guest-to-hypervisor communication. At the top level (called virtio) is the virtual queue interface that conceptually attaches front-end drivers to back-end drivers. Drivers can use zero or more queues, depending on their need. For example, the virtio network driver uses two virtual queues (one for receive and one for transmit), where the virtio block driver uses only one. Virtual queues, being virtual, are actually implemented as rings to traverse the guest-to-hypervisor transition. But this could be implemented any way, as long as both the guest and hypervisor implement it in the same way. 

{{op1{Guest OS : Ubuntu Linux 核心已內建 Virtio 驅動程式}}}
{{{
$ sudo find /lib/modules/2.6.32-32-generic-pae/ -name virtio*
/lib/modules/2.6.32-32-generic-pae/kernel/drivers/virtio/virtio_balloon.ko
/lib/modules/2.6.32-32-generic-pae/kernel/drivers/char/hw_random/virtio-rng.ko
/lib/modules/2.6.32-32-generic-pae/kernel/drivers/char/virtio_console.ko

$ sudo find /lib/modules/2.6.32-27-generic-pae/ -name virtio*
/lib/modules/2.6.32-27-generic/kernel/drivers/virtio/virtio_balloon.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/virtio/virtio.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/virtio/virtio_pci.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/virtio/virtio_ring.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/char/hw_random/virtio-rng.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/char/virtio_console.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/block/virtio_blk.ko
/lib/modules/2.6.32-27-generic/kernel/drivers/net/virtio_net.ko

$ sudo find /lib/modules/2.6.31-14-generic-pae/ -name virtio*
/lib/modules/2.6.31-14-generic/kernel/drivers/virtio/virtio_balloon.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/virtio/virtio.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/virtio/virtio_pci.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/virtio/virtio_ring.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/char/hw_random/virtio-rng.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/char/virtio_console.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/block/virtio_blk.ko
/lib/modules/2.6.31-14-generic/kernel/drivers/net/virtio_net.ko
}}}

''Virtio 設計者 : Rusty Russell''

<html><img src="img/Rusty Russell.jpg" width="30%" height="30%"/></html>

@@font-size:14pt;
Russell wrote the packet filtering systems ''ipchains'' and ''netfilter/iptables'' in the Linux operating system kernel. 

''Linus Torvalds (Linux 大師)'' has referred to him as one of his "top deputies (主要代理人)"

Rusty Russell Wikipedia : http://en.wikipedia.org/wiki/Rusty_Russell
@@
<<toBalaNotes "kvm">>


///%kvm
//%/

///%1
//%/
''參考網站''
1.IBM Service Science, Management and Engineering (SSME)
http://www.ibm.com/ibm/ideasfromibm/us/compsci/20080728/index.shtml

本文網址 : http://www.itmag.org.tw/magazine/article_single_470.htm

''服務產業'' 已經成為所有先進國家之重要經濟活動,以美國來說,2005 年的 GDP,就有七成五來自服務業;而日、德、英也都有七成左右的比例。我國 2005 年的 GDP 有高達73.6% 的比例來自服務業,並有將近六成的人在服務業工作。更令人吃驚的是,發展中國家,如中國和印度,雖然目前服務業佔 GDP 比例僅有三成左右,然而卻以遠超越美國的高速度成長,尤其是中國大陸,服務業比例成長的速度是美國的九倍以上。

目前全球服務業正在以非常驚人速度成長,製造業裡面也發生了重要的 ''服務業革命''。以美國 IBM 和 GE 為首的私人企業,已轉型成為服務導向型企業,公司獲利來源皆有將近五成來自服務的營收。其中又以 IBM 的例子最為人所熟知,在 IBM 將 PC 部門整個賣給中國聯想(Lenovo)之後,可說正式轉型為服務導向型的製造商,而這樣的轉型過程,可以三個歷程來說明:

一、製造與生產時期:製造業原本的主業,是專注於製造與生產,並將銷售部份委託通路來進行。
二、加值服務時期:隨著高科技產品越來越複雜,製造業者所必須提供的售後維護服務與售前顧問服務也越來越多;因此維修服務、使用指導服務、相關資訊提供服務、社群維護服務等快速興起。
三、轉型服務時期:為了協助客戶有效運用自身所製造的產品,製造業者提供更豐富的顧問服務,甚至規劃出包含競爭對手產品的完整解決方案,來協助客戶導入;久而久之,客戶認可該製造業所提供的服務,比所生產的商品更具價值,於是該製造商轉型為服務導向型製造商。

由於服務經濟的快速興起,美國認知光是在工業科學上的進步已經不足以確保美國的長治久安,唯有積極發展和服務相關的科學,才能因應未來的變局。於是在 2004 年 12 月,美國競爭力委員會發表 Palmisano Report,指出美國須從三個面向(人力資源、投資及基礎建設)強化服務業創新,報告中並指出,美國面臨全球化競爭威脅、科學及技術研究趨緩以及技術移轉到製造領域的應用延遲等問題;由於服務業為美國主要經濟來源,推斷美國在服務領域上缺乏創新商業流程設計、組織及管理的研究投資。在這樣的背景下,服務科學取代傳統的工業技術研究,成為美國重點的研究方向,也帶動了全世界研究服務科學的熱潮。

何謂服務科學?服務科學,簡單的說,便是以科學方法發展可以提高服務業的生產力與創新力的方法。''IBM 以 SSME 稱之,也就是服務科學(Services Science)、管理(Management)與工程(Engineering)的縮寫,一般以服務科學(Services Science)簡稱之''。

整個人類科學研究重心的變遷,可以分為三個時期 :''十九世界的機械系統時期''、''二十世界的資訊系統時期'',與''二十一世紀的服務系統時期''。在機械系統時期,科學研究的重心,是如何將材料與動力轉變為價值輸出,其中經典的範例便是蒸汽火車;毋庸置疑的,蒸汽火車大幅改變了人類的生活形態與經濟形態,美國的經濟大幅起飛,便是拜橫跨東西岸的火車運輸系統之賜。然而到了二十世界,另一個重要的科學發明:''電腦與網路'',成就了更高的經濟成長率,其中代表性應用便是搜尋引擎,它將資訊本身轉變為價值輸出,大幅改善了訊息傳遞的效率。到了二十一世紀,美國的專家學者指出,''服務系統的變革'',將是經濟成長的另一個里程碑!以境外客服中心為例,一個結合了人類技能與通訊科技的產品,大幅降低了客服中心的維運成本。這類的服務系統,可將包含人、技術、組織與科技的混合體,以創新的方式遞送服務到遠端,無遠弗屆。

在美國,研究服務科學的機構包含了許多優質大學,例如加州柏客萊大學(UC Berkeley: http://ssme.berkeley.edu/)已經開始提供 SSME 課程,而其他如卡內基美濃(Carnegie Mellon University)、麻省理工(Massachusetts Institue of Technology)、賓州州立大學(Penn State Univsersity)等近十所名校也有相關研究正在進行。其他國家如德國有 Fraunhofer 學院(Fraunhofer Institute for Industrial Engineering)專注在服務科學研究,英國有克萊菲爾德商學院(Cranfield School of Management)的服務管理中心,日本有先進科學與技術大學(JAIST)開了服務科學課程等。連中國哈爾濱大學在服務工程語言 USML 的發展上,都讓世界驚豓;而 台灣今年 (2007) 開始,才有清華大學和政治大學加入這方面的研究,怎不叫人捏把冷汗。

服務科學的內涵包含了很多必須深入研究的議題,尤其是在資訊通訊科技快速發展的背景下,服務的遞送已經不只是靠人便可以完成,科技的引進造就了所謂的 Cyber-infrastructure,讓服務的體系成了科技與人文的混合體,而 Web 2.0 之類的創新線上服務模式,更讓服務定價、管理、評估和測試等議題成了新研究焦點。日本 IBM 的 Hidaka 博士條列了一些研究的議題供研究者參考,包含了:
{{{
- 創新服務研發與管理
- 提高服務效益的技術
- 服務的價格如何設定
- 服務的生產力之評估
- 促進服務品質及效益的方法論及工具
- 服務的測試
- 服務專案的風險控管
- 作業管理(OR)及全面最佳化
- 計算型組織理論 (Computational organization theory)
}}}

由於服務科學和國家經濟息息相關,政府角色的重要性便不言而喻。美國早在 2002 年開始,便有服務企業工程計畫(Service Enterprice Engineering,簡稱SEE)的推動,用政府資金獎勵學術界進行服務工程與科學的研究,而 IBM 自 2004 年起,更以業界龍頭的態勢,主導美國乃至全球 SSME 的發展。其他如歐盟成立軟體與服務推動計畫(Networked European Software and Services Initiative,簡稱 NESSI),在未來七年投入480億歐元進行跨學科的服務創新研究;中國大陸的十一五計畫(第十一個五年計畫)將從 2006 年到 2010 年推動現代服務業的發展,中國清華大學與北京大學並將與IBM合作發展服務科學與工程。日本內閣亦通過 2006-2010 年科技政策,明確表述需要科學方法以實現創新服務,並由經產省成立服務創新研究組,探究跨領域學科的合作與推動。政府角色和服務科學進展間的關連,可見一斑;反觀台灣,服務科學的思惟尚在啟蒙階段,如何加速國人對服務科學的認知與了解,恐怕是當務之急。

號稱晶圓代工王國的台灣,目前正面臨開發中國家(尤其是中國和印度)的挑戰,以及產業外移的衝擊,台灣經濟如何走出陰霾?在『製造經濟』轉型為『服務經濟』的趨勢下,如何解決服務經濟下所衍生出各領域發展服務的相關難題?如何提升創新服務的能力?加強差異化競爭力?服務科學或許是解答。

<<toBalaNotes "ssme">>


///%ssme
//%/
桌面雲 (Desktop Cloud) 是 IBM 在 2008 年喊出的新名詞,其概念類似於傳統的精簡用戶端 (Thin Client) 電腦,但更強調與資料中心伺服器間的整合,以及相關虛擬化應用軟體派送 (application delivery)、資訊安全控管等的整合設計。

桌面雲的主要好處,便是能夠有效節省企業在 IT 系統管理的成本支出。舉例來說,傳統 PC 環境下,個別 PC 都需要安裝好所有業務所需的軟體,造成 IT 人員的管理負擔,而許多公司資訊也會在此架構下,儲存在員工的桌面電腦上,增加資訊外洩的風險,然透過應用軟體統一由資料中心伺服器派送的模式,則可避免上述缺點。

桌面雲傳統作法, 就是 ''Termainal Server'', 思杰(Citrix) 是這技術的第一品牌, 但 ''思杰(Citrix)'' 並未採用 桌面雲一詞,可是也同樣將此領域劃入雲端運算發展的重點藍圖之一。

''企業導入雲端-我們的第一個建議 : 桌面雲''

雲是飄過來了,但有多種,銀行該如何導入雲端運算呢?我們的建議是從私有雲開始,並選擇最具投資效益的桌面雲為起點。

目前銀行人手一台的個人電腦其實是個龐大的支出,以某個百餘家分行的銀行為例,單是汰舊換新、運作維護與電力消耗,每年就高達新台幣四至五千萬元,若將全部個人電腦分七年換成桌面雲,其資訊投資的內部報酬率 (IRR),在不同組合下約在 11% 至 32% 之間,還附帶無價的資訊安全,以及避免員工用個人電腦做雜事等外部效應。

<<toBalaNotes "1">>

<html><img src="img/cloudvm.png" width="120%" height="120%"/></html>
<html><img src="img/cloudvm01.png" width="120%" height="120%"/></html>
<html><img src="img/cloudvm02.png" width="120%" height="120%"/></html>

///%1
//%/
''參考網站''
1. IBM Mote Runner
http://www.zurich.ibm.com/moterunner/
2. IBM Mote Runner SDK
http://www.alphaworks.ibm.com/tech/moterunner
3. Mote Runner Hardware
http://www.memsic.com/products/wireless-sensor-networks/wireless-modules.html
4. IBM 院士王雲談物聯網發展趨勢 
http://www.ithome.com.tw/itadm/article.php?c=63845
5. 國立高雄第一科技大學舉辦物聯網 IOT 研討會
http://www.flag.com.tw/school/iot/2010q4/iot2010_nkfust_sign.html


[img[img/InternetOfThings.png]]
本文網址 : http://zh.wikipedia.org/zh-tw/%E7%89%A9%E8%81%94%E7%BD%91

物聯網就是把感測器裝備到電網、鐵路、橋樑、隧道、公路、建築、供水系統、大壩、油氣管道以及家用電器等各種真實物體上,通過網際網路聯接起來,進而運行特定的程序,達到遠程控制或者實現物與物的直接通信。物聯網,即通過裝置在各類物體上的射頻識別(RFID),感測器、二維碼等,經過介面與無線網路相連,從而給物體賦予「智能」,實現人與物體的溝通和對話,也可以實現物體與物體互相間的溝通和對話,這種將物體聯接起來的網路被稱為「物聯網」。

物聯網一般為無線網,由於每個人周圍的設備可以達到一千至五千個,所以物聯網可能要包含500億至一千萬億個物體,在物聯網上,每個人都可以應用電子標籤將真實的物體上網聯結,從一瓶酸奶到一架飛機,在物聯網上都可以查找出它們的具體位置,即使在地球的另一邊。

想想看,每人每天要用到的物品有多少? 鑰匙、汽車、手機、音響、冷氣機、眼鏡、檯燈、冰箱、咖啡機、洗衣機等等,少則一兩百項,多則上千項,如果這些物品彼此都能建立連線,會形成多麼大的網路!美國研究機構 FORRESTER 預測,物聯網的業務量將是互聯網的 30 倍,產值高達新台幣 10 兆元!

從美國總統歐巴馬將「智慧地球」提升為國家發展策略之後;中國總理溫家寶也緊接著提出「感知中國」。「智慧地球」與「感知中國」的核心工程,其實都是「物聯網」的範疇。

中國政府更將物聯網納入「十二五」計畫(第 12 個 5 年計畫),將其定為戰略性新興產業。中國產業界和學術界都將物聯網視為下一代的技術革命,全力掌握核心技術。

{{item1{物聯網應用}}}
''2010 IDEAS Show 網路創意展''
Lover's Cup Live!讓你透過一個可以上網的抱枕,感測你的心跳,讓遠在天邊的男女朋友可以彼此感知對方的心(真甜蜜); DroidPaPa則是讓你用遙控器調整手機的靜音設定,讓你找出因為不小心調成靜音又搞不見了的手機( 如果遙控器不見的話又該怎麼辦)。這兩個服務共通的特點是,他們讓網路溢出我們熟悉的電腦或手機,更進一步染指了抱枕和遙控器。 

{{item1{IBM launches software to spur sensor management, 'Internet of things'}}}
本文網址 : http://www.zdnet.com/blog/btl/ibm-launches-software-to-spur-sensor-management-internet-of-things/35409

IBM on Monday will roll out a software development kit for an application dubbed Mote Runner with the aim of spurring the adoption of sensors in various devices, products and systems. The real goal is to enable the so-called Internet of things by making sensor networks easier to deploy and manage.

Mote Runner is a free download. IBM will make the announcement at the 2010 Sensors Expo & Conference. The Mote Runner moniker refers to Mote’s—or wireless sensor nodes—that gather information ranging from temperature, movement and light and refer back to a network.

[img[img/moterunner.jpg]]

Meanwhile, MEMSIC, which makes these micro sensor systems, will include Mote Runner on one of its IRIS sensor. The MEMSIC IRIS is a 2.4 GHz wireless sensor mote (right) used for enabling low-power wireless sensor networks in buildings or traffic patterns at an intersection.

The moves come as the Internet of things has become a focal point of more businesses. For instance, telecom companies see enabling machines with wireless access as a growth market. These machine to machine connections may fuel future growth for a wireless industry that is nearing saturation in the U.S.

Toss in smart grids, appliances and homes and we’re headed toward many interconnected devices carrying embedded software. IBM said it’s moving to release Mote Runner as governments and companies are taking advantage of low-cost sensors to monitor buildings, business systems and other networks in a bid to garner real-time data that will help decision making.

How would this work? IBM said Mote Runner could be used by a building management company to monitor and analyze sensors in a high-rise. The company could then develop apps for the sensors, monitor equipment and data and reprogram them remotely as needed. These sensors could also be used to monitor the elderly in their homes. Agriculture, climate monitoring, health care and water systems could also make similar use of Mote Runner and a sensor network.

Among the key points about Mote Runner:

* The software was created by IBM Research.
* It’s a low-footprint platform that can link standard languages like Java with sensor virtual machines.
* Uses a simulation environment and Web dashboard based on Eclipse.
* Mote Runner is designed to be a low power app running on an 8-bit processor, 8 KB of RAM and 64 KB of flash memory. That’s the equivalent of what a PC operating system needed in the 1970s.

Here’s a look at Mote Runner’s dashboard:

[img[img/MoteRunnerdashboard.jpg]]

<<toBalaNotes "iot">>



///%iot
//%/
''參考文章''
1. 為什麼虛擬化是雲端運算的基礎?
http://vaemon.com/article/588.html
2. Virtualization - CPU
http://benjr.tw/?q=node/216
3. 改變 - 就從虛擬化開始 (VMware)
http://www.runpc.com.tw/content/content.aspx?id=104205
4. 從 VT-x  到 VT-d Intel 虚擬化技術發展藍圖
http://stenlyho.blogspot.com/2009/01/vt-xvt-d-intel.html
5. How to assign devices with ''VT-d in KVM''
http://www.linux-kvm.org/page/How_to_assign_devices_with_VT-d_in_KVM
6. 簡單了解Intel VT-d
http://vaemon.com/article/865.html
7. [SOLVED] kvm nic passthrough: link but no ping 
http://ubuntuforums.org/showthread.php?t=1441845

{{item1{過去的虛擬化技術}}}
本文網址 : http://benjr.tw/?q=node/216 (必讀)

和虛擬化技術最相關的就以 CPU 莫屬,不過在 x86 的 CPU 也不是一開始就支援虛擬化了,他是經過好幾代的改變,才由 CPU 去支援虛擬化的工作, ''過去常見的模式'' 可以區分成下面幾種.

[img[img/vmtype.png]]

''[註]'' VMM (Virtual Machine Manager)

其中最為人知的虛擬化軟體非屬 VMware ,而在 1999 的 2 月,VMware 推出他們第一款 x86 虛擬化的產品 "VMware Virtual Platform",不過在一開始 VMware 是透過純軟體的方式去模擬出一個最簡易的硬體環境.再讓 Guest OS 運作在虛擬的硬體上面.所以算是 ''完全虛擬化 (Full virtualization)'' 的虛擬化的方式,主要是透過 Binary translation 的方式去解決虛擬和真實的硬體彼此溝通的橋樑.不過也因為如此在效能上比起一些以 natively virtualizable architecture, 如 IBM System/370 or Motorola MC68020 較差.

所以另外一項技術 Xen 在最近幾年快速竄起,他提供了比 完全虛擬化 (Full virtualization) 較優的效能.因為他不是採用以虛擬出一個全新的硬體給 Guest OS,它採用的方式為 ''半虛擬化 (Para- Virtualization)'', 但前提是 Guest OS 的核心都需要修改, 這樣 Guest OS 就可以直接和硬體層溝通,所以除了大部分的 Linux 和 FreeBSD 可以做修改外.Windows 的系統是無法使用這項技術,因為 Windows 不願意去修改他們的核心也不願意給別人修改.但是這一項技術在 CPU 的硬體支援下, 終於不用再透過修改核心的方式去配合 Para- Virtualization 的運作方式, 而改由 CPU 去支援.這就是 Hardware-assisted virtualization  (Intel VT 和 AMD-V) ,這一項技術在 Xen 3.0 得到支援,不過這種以 CPU 為支援的方式在 Xen 下面也是被稱為 Full virtualization

''@@color:red;過去的專有名稱@@''
1. 半虛擬化 : 需修改系統核心
2. 完全虛擬化  : 需執行 CPU 指令轉碼

{{item1{硬體輔助虛擬化 (Hardware-assisted virtualization)}}}
Intel VT-x 與 AMD-V。這兩家 x86 處理器廠商對處理器硬體作了修改,當 Guest OS 核心系統發出特權指令時, 能夠自動被 hypervisor 截獲。所以在這種新機器上,Para-Virtualization 就沒有必要再對 Guest OS 內核作修改,Full-Virtualization 也無必要對 Guest OS 做 CPU 指令轉碼。所以我們也可以說,如果不考慮 Para-Virtualization 與 Full-Virtualization 在 IO 設備處理上還有不同之處,那麼 CPU 硬體協助的虛擬化技術, 已經取消了前兩種虛擬化技術之間的差別:兩者都可以被看作是全虛擬化技術。

Intel 和 AMD 在對 X86 伺服器硬體協助的虛擬化技術上還做了如下重要工作:統一管理了平臺上輸入輸出設備對內存的直接訪問(Direct Memory Access, DMA)。這改變了以前機器上輸入輸出設備可以自由任意對內存進行直接訪問,這種“無政府主義”的危險狀態(非常危險!)。用硬體協助的虛擬化技術對 DMA 作統一管理,這對於x86 平臺伺服器虛擬化技術運用到雲計算, 加強雲計算安全方面有很重要的意義。前面我們提到硬體協助的虛擬化技術取消了 Para- Virtualization 與 Full-Virtualization 之間的差別,這樣的說法沒有考慮到兩者在 IO 設備管理上的不同之處。其實正是在對 IO 設備 DMA 的統一管理方面,Citrix Xen 或 MS Hyper-V 與 VMware ESX 有很不同的性質,在雲服務安全上有明顯的差異。

''@@color:red;有了 Hardware-assisted virtualization 技術, 各家虛擬技術均可稱為 Full-Virtualization, 它們的差別在週邊硬體 (網卡, 硬碟) 虛擬化支援@@''
 
''目前 Intel 推出含有 VT 的技術包括了下面幾項.''
1. VT-x 這是屬於 IA-32 和 Intel 64 架構的
2. VT-i 這是屬於 Itanium 架構的
3. VT-d refers to Intel VT for Directed I/O

''目前 Intel 推出的 CPU 並未包括下面這項,這是未來 Intel 將計畫推出的.''
1. [[VT-c refers to Intel VT for Connectivity (一定要看)|http://benjr.tw/node/596]]

''AMD virtualization (AMD-V)''
AMD 包含虛擬化的 CPU 在一開始的名稱為 Pacifica 應該也是為 Project code name.但是現在都被稱為 AMD Virtualization 就簡稱為 AMD-V

至於哪一些 AMD CPU 是包含 AMD-V 的技術,大概Athlon 64 和 Athlon 64 X2 這一系列 "F" 和 "G" stepping 的處理器以及Turion 64 X2, Opteron 2nd generation and 3rd-generation, Phenom 和 Phenom II processors 皆支援 AMD-V.但是像是低階的 Sempron 處理器除了 Sable 和 Huron 都不支援 AMD-V.

{{item1{虛擬平台架構}}}
''@@color:red;A. 寄居架構 (Hosted Architecture)@@''
其主要特性為將虛擬機器運行於 Host OS 上,形成 OS in OS 的結構,視為作業系統上一個應用程式。最大好處為硬體相容性高,只需要 Host OS 已安裝使用,Guest OS 便可加以利用。最主要問題為若 Host OS 出現問題,則所有虛擬機器將同時無法運作,且各虛擬機器之間無獨立的硬體資源空間。VMware Workstation、Microsoft Virtual PC 皆屬於此範疇。

[img[img/virtual03.jpg]]

''@@color:red;B. 裸機架構 (Bare-Metal)@@''
成為 Hypervisor,接管所有硬體資源,並可將 Server 集中管理,任一 Guest OS 運作異常皆不會影響其他 Guest OS。最大問題為硬體相容性,並支援多種 Server 級儲存設備,如 iSCSI SAN、FC SAN 等。代表性的原生架構 Hypervisor 有 VMware ESX Server,  Microsoft Hyper-V Server 與 Linux KVM。

[img[img/virtual04.png]]

<<toBalaNotes "1">>
{{item1{摩爾定律}}}
@@font-size:24px;line-height:30px;color:red;
應如何有效使用倍數成長的高效能電腦 ?  
虛擬化是一個絕佳的解決方案
@@

摩爾定律是由英特爾(Intel)創始人之一戈登·摩爾(Gordon Moore)提出來的。其內容為:集成電路(IC)上可容納的電晶體數目,''約每隔 18 個月便會增加一倍,性能也將提升一倍'',當價格不變時;或者說,每一美元所能買到的電腦性能,將每隔 18 個月翻兩倍以上。這一定律揭示了資訊科技進步的速度。

1965 年4月19日,《電子學》雜誌(Electronics Magazine)第114頁發表了摩爾(時任仙童半導體公司工程師)撰寫的文章〈讓集成電路填滿更多的元件〉,文中預言半導體晶片上整合的電晶體和電阻數量將每年增加一倍。

1975 年,摩爾在 IEEE 的一次學術年會上提交了一篇論文,根據當時的實際情況對摩爾定律進行了修正,把「每年增加一倍」改為「每兩年增加一倍」,而現在普遍流行的說法是「每 18 個月增加一倍」。但1997年9月,摩爾在接受一次採訪時宣告,他從來沒有說過「每 18 個月增加一倍」。

大抵而言,若在相同面積的晶圓下生產同樣規格的 IC,隨着製程技術的進步,每隔一年半,IC 產出量就可增加一倍,換算為成本,即每隔一年半成本可降低五成,平均每年成本可降低三成多。就摩爾定律延伸,IC 技術每隔一年半推進一個世代。

摩爾定律是簡單評估半導體技術進展的經驗法則,其重要的意義在於長期而,IC 製程技術是以一直線的方式向前推展,使得 IC 產品能持續降低成本,提升性能,增加功能。

台積電董事長張忠謀曾表示,摩爾定律在過去 30 年相當有效,未來 10~15 年應依然適用。但最新的一項研究發現,"摩爾定律"的時代快會結束。因為建造研究和實驗室的成本需求十分高,而有財力投資建立和維護晶片工廠的企業很少。

{{item1{利用虛擬化改善您的投資報酬率 (Return on Investment, ROI)}}}
本文網址 : http://www.vmware.com/tw/overview/

基本上,虛擬化會透過在多個環境中共用單一電腦的資源,讓一部電腦完成多部電腦的工作。''虛擬伺服器''和''虛擬桌面平台''可讓您從本機和遠端位置代管多種作業系統和多種應用程式,進而突破實體和地理位置的限制。當您建立虛擬基礎架構之後,除了提高硬體資源的 ''使用效率'' 而 ''節省能源'' 並 ''降低資本支出'' 以外,您還可以提升 ''資源的可用性''、''有效管理桌面平台''、''增加安全性'',以及 ''改善災難復原程序''。

''何謂虛擬化?''
虛擬化是一項經證實正在快速轉變 IT 世界版圖,以及從根本上改變電腦運算方式的軟體技術。

現今功能強大的 x86 電腦硬體原本是設計成只能執行單一作業系統和單一應用程式,不過虛擬化打破了這條陳規,讓您能在同一部電腦上同時執行多種作業系統和多種應用程式,進而提升硬體的使用率和彈性。

虛擬化是一項可讓任何電腦使用者獲益的技術,從 IT 專業人員 和 進階使用者 到 商務企業 和 政府組織 都包含在內。現在加入全球數百萬虛擬化使用者的行列,即可節省時間、成本和能源,同時發揮現有電腦硬體的最佳效能。

''虛擬化的發展史''
IBM 在 30 年以前首次實作了虛擬化技術,藉以將主機電腦邏輯分割成不同的虛擬機。這些磁碟分割讓主機具有「多工架構」:可同時執行多種應用程式和程序。因為主機在當時屬於昂貴的資源,所以它們會設計成分割運作,以便充分運用投資成本。

''x86 虛擬化的需求''
實際上,虛擬化技術在 1980 和 1990 年代處於棄而不用的狀態,因為當時用戶端伺服器應用程式以及成本低廉的 x86 伺服器和桌面平台確立了分散式運算的模型。組織利用低成本的分散式系統來建立孤立的運算資源,而非在主機模型內集中共用資源。在 1990 年代廣泛採用的 Windows 和異軍突起的 Linux 伺服器作業系統確立了 x86 伺服器成為業界標準。隨著 x86 伺服器和桌面平台部署的成長,卻產生了新的 IT 基礎架構和營運挑戰。這些挑戰包括:

1. 基礎架構的使用率過低。根據市場研究公司 International Data Corporation (IDC) 的報告指出,一般 x86 伺服器部署的平均使用率僅達總容量的 10% 至 15%。通常,組織會在每部伺服器上執行單一應用程式,藉此避免同一部伺服器上某個應用程式弱點影響其他應用程式可用性的風險。

2. 實體基礎架構的成本增加。支援成長中實體基礎架構的營運成本不斷穩定地增加。大部分運算基礎架構都必須隨持保持運作狀態,因而導致電源耗用量、散熱和設施成本無法隨著使用率等級變化。

3. IT 管理的成本增加。隨著運算環境日趨複雜,基礎架構管理人員所需的專業教育和經驗等級以及這類人員的相關成本便增加。組織將花費不成比例的時間和資源來處理與伺服器維護相關的手動工作,因此會需要更多人員來完成這些工作。

4. 容錯轉移和災難保護機制不足。組織將持續不斷地受到關鍵伺服器應用程式的停機時間和關鍵使用者桌面平台的故障所影響。安全性攻擊、自然災害、流行病毒和恐怖份子的威脅已經提升了針對桌面平台和伺服器規劃業務續航力的重要性。

5. 一般使用者桌面平台的維護作業增加。在管理和保護企業桌面平台安全方面,產生了許多挑戰。在不影響使用者工作效率的情況下,控制分散式桌面平台環境以及執行管理、存取和安全性原則,確實是一項複雜且成本高昂的作業。此外,您必須持續將許多修補程式和升級檔套用至桌面平台環境,才能排除安全性弱點。

''採用虛擬化軟體的 5 大理由''
1. 伺服器整合和基礎架構最佳化:虛擬化會透過共用一般基礎架構資源並打破傳統「一個應用程式對一部伺服器」的模型,讓您達到大幅提升資源使用率的效果。

2. 降低實體基礎架構的成本:您可以利用虛擬化來降低資料中心內伺服器與相關 IT 硬體的數目。如此一來,便降低機房空間、電源和散熱的需求,因而大幅減少 IT 成本。

3. 改善營運彈性與回應速度:虛擬化提供了一種管理 IT 基礎架構的全新方式,可協助 IT 管理員花費更少的時間處理重複的工作,例如佈建、設定、監視和維護。

4. 提升應用程式可用性和改善業務續航力:透過在不中斷服務的情況下進行安全備份以及移轉整個虛擬環境的功能,您可以排除計畫中的停機時間並快速地從意外停機事件中恢復。

5. 改善桌面平台管理能力與安全性:不論有沒有網路連線,您幾乎可以在任何標準桌面平台、筆記型電腦或 Tablet PC 上,從本機或遠端位置部署、管理並監視一般使用者可存取的安全桌面平台環境。

''何謂虛擬基礎架構?''
基本上,虛擬基礎架構就是實體資源與業務需求的動態對應。虛擬機代表單一電腦的實體資源,而虛擬基礎架構則代表整個 IT 環境的實體資源,並將 x86 電腦及其連接的網路和儲存設備彙總成統一的 IT 資源集區。

在結構上,虛擬基礎架構是由下列元件所組成:
* 單一節點的虛擬管理程式,可讓每部 x86 電腦完全虛擬化。
* 包括像是資源管理的一組虛擬化分散式基礎架構服務,可最佳化虛擬機之間的資源。
* 提供特殊功能的自動化解決方案,可最佳化像是佈建或災難復原等特定 IT 程序。
透過將整個軟體環境與基礎硬體基礎架構分離,虛擬化可以讓多個伺服器、儲存基礎架構和網路彙總成共用的資源集區,以便視需要以動態、安全且可靠的方式提供給應用程式。這項領先業界的方法讓組織能夠使用成本低廉的業界標準伺服器基本元件,打造高階使用率、可用性、自動化和彈性的運算基礎架構。

<<toBalaNotes "vmstd">>


///%1
//%/

///%vmstd
//%/
''參考文章''
1. Anatomy of an open source cloud
http://www.ibm.com/developerworks/opensource/library/os-cloud-anatomy/
2. Run your own Ubuntu Enterprise Cloud, part 3
http://fnords.wordpress.com/2009/10/13/run-your-own-uec-part-3/]]
3. Open vSwitch
http://openvswitch.org/ 

{{{
一位 open source guru 說:"Private Cloud is not true Cloud." 他的論點是著眼於企業 IT 的 Private Cloud 缺乏 Public Cloud 資源無所懼 (unlimited resource) 的特性,所以 Private Cloud 通常只是進行「虛擬自動化」(Virtualization Automation),服務的擴充性 (Scalability) 會受限於企業資料中心的規模。 同學們,你們同意這種說法嗎?
}}}

本文網址 : http://www.ibm.com/developerworks/opensource/library/os-cloud-anatomy/index.html?S_TACT=105AGX01&S_CMP=HP

The use of the ''cloud'' as an abstraction is quite common for the ''distributed system'' that is the Internet, but the past few years have seen this abstraction expanded to incorporate ''highly virtualized (KVM, VMware,..)'' and ''scalable infrastructures (Hadoop, GRID,..)'' that are easily provisioned (供應) as a service (either locally or remotely). 

{{item1{Cloud computing anatomy}}}
This article begins with an exploration of the core abstractions of cloud architectures (from Infrastructure as a Service [IaaS]), then moves beyond the building blocks to the more highly integrated solutions.

Although not a requirement, virtualization provides unique benefits for building dynamically scalable architectures. In addition to scalability, virtualization introduces the ability to migrate virtual machines (VMs) between physical servers for the purposes of load balancing. Figure 1 shows that the virtualization component is provided by a layer of software called a hypervisor (sometimes called a virtual machine monitor [VMM]). This layer provides the ability to execute multiple operating systems (and their applications) simultaneously on a single physical machine. On the hypervisor is an object called a virtual machine that encapsulates the operating system, applications, and configuration. Optionally, device emulation can be provided in the hypervisor or as a VM. Finally, given the new dynamic nature of virtualization and the new capabilities it provides, new management schemes are needed. This management is best done in layers, considering local management at the server, as well as higher-level infrastructure management, providing the overall orchestration of the virtual environment.
 
''Figure 1. Core elements of a node in the cloud''
[img[img/CloudNode.gif]]

If you take those nodes from Figure 1 and multiply them on a physical network with shared storage, orchestrating management over the entire infrastructure, then provide front-end load balancing of incoming connections (whether in a private or a public setting) with caching and filtering, you have a virtual infrastructure called a cloud. This new construction is shown in Figure 2. Dormant machines can be powered down until needed for additional compute capacity (providing better power efficiency), with VMs balanced (even dynamically) across the nodes depending upon their individual loads.

''Figure 2. Cloud computing infrastructure''
[img[img/CloudComputingInfrastructure.gif]]

With the basic architecture of a cloud defined, let's now explore where open source is being applied to build out a dynamic cloud infrastructure. 

{{item1{Core open source technologies}}}
The Linux landscape is seeing a wave of development focused on virtualized infrastructures for virtualization, management, and larger-scale integration of cloud software packages. Let's start with a view of open source at the individual node level, then step up to the infrastructure to see what's happening there.

{{op1{Hypervisors (超級監督者)}}}
The base of the cloud at the node level is the hypervisor. Although virtualization is not a requirement, it provides undisputed (確實的) capabilities for scalable and power-efficient architectures. There exist a number of open source virtualization solutions, but two key solutions are those that transform the Linux operating system into a hypervisor: the ''Linux Kernel Virtual Machine (KVM)'' and ''Lguest''. KVM is the official hypervisor solution, being deployed into production environments. Lguest is a Linux-focused solution that runs only Linux VMs, but is integrated into the kernel and finding wider use.

{{op1{Hypervisors are classified into two types:}}}
''- Type 1 (or native, bare-metal) hypervisors'' are software systems that run directly on the host's hardware to control the hardware and to monitor guest operating-systems. A guest operating system thus runs on another level above the hypervisor.

This model represents the classic implementation of virtual machine architectures; the original hypervisor was CP/CMS, developed at IBM in the 1960s, ancestor of IBM's current z/VM.

More recent examples include VMware ESX Server, INTEGRITY from Green Hills Software, LynxSecure from LynuxWorks, L4 microkernels including OKL4 from Open Kernel Labs, Real-Time Systems RTS-Hypervisor, VirtualLogix VLX, TRANGO (now VMware MVP), Microsoft Hyper-V (released in June 2008), IBM POWER Hypervisor (PowerVM), IBM System z Hypervisor (PR/SM), NeoCleus NeoSphere from Neocleus ,NxTop Engine from Virtual Computer, Xen, Citrix XenServer, Oracle VM Server, Parallels Server (released in 2008), ScaleMP vSMP Foundation (released in 2005) , Sun's Logical Domains Hypervisor (released in 2005), Wind River's hypervisor and VxWorks MILS Platform, XtratuM.:A variation of Type-1 hypervisors involves embedding the hypervisor in the firmware of the platform, as is done in the case of Hitachi's Virtage hypervisor and VMware ESXi. This group also includes Kernel-based Virtual Machine (KVM) which turns a Linux kernel into a hypervisor[citation needed].

''- Type 2 (or hosted) hypervisors'' are software applications running within a conventional operating-system environment. Considering the hypervisor layer as a distinct software layer, guest operating systems thus run at the third level above the hardware.

Examples include VMware Server (formerly known as GSX), VMware Workstation, VMware Fusion, the open source QEMU, Microsoft Virtual PC, Microsoft Virtual Server products, Oracle's VirtualBox and KUKA's [RTOSWin] [2] as well as Parallels Workstation and Parallels Desktop and TenAsys' eVM.

Note: Microsoft Hyper-V (released in June 2008)[2] is an example of a type 1 product that is often mistaken to be type 2. Both the free stand-alone version and the version that is part of the commercial Windows Server 2008 product use a virtualized Windows Server 2008 parent partition to manage the Type 1 Hyper-V hypervisor. In both cases the Hyper-V hypervisor is loaded prior to the management operating system, and any virtual environments created run directly on the hypervisor, not via the management operating system.

The term hypervisor apparently[original research?] originated in IBM's CP-370 reimplementation of CP-67 for the System/370, released in 1972 as VM/370. The term hypervisor call, or hypercall, referred to the paravirtualization interface, by which a guest operating system could access services directly from the (higher-level) control program – analogous to making a supervisor call to the (same level) operating system. The term supervisor refers to the operating system kernel, which runs in supervisor state on IBM mainframes.

{{op1{Device emulation}}}
The hypervisor provides the means to share the CPU with multiple operating systems (CPU virtualization), but to provide full virtualization, the entire environment must be virtualized for the VMs. Machine — or platform — emulation can be performed in a number of ways, but a popular open source package that supports a number of hypervisors is called QEMU. QEMU is a complete emulator and hypervisor. But KVM makes use of QEMU for device emulation as a separate process in the user space (see Figure 1). One interesting feature of QEMU is that because it provides disk emulation (through the QCOW format), QEMU provides other advanced features such as snapshots and live VM migration.

KVM, since kernel 2.6.25, uses virtio as a means of optimizing I/O virtualization performance. It does this by introducing paravirtualized drivers into the hypervisor with hooks from the guest to bring performance to near-native levels. This works only when the operating system can be modified for this purpose, but finds use in Linux guest on Linux hypervisor scenarios.

Today, virtio and QEMU work together so emulated device transactions can be optimized between the Linux guest and QEMU emulator in the user space.

{{op1{Virtual networking}}}
As VMs consolidate onto physical servers, the networking needs of the platform intensify. But rather than force all of the VM's networking to the physical layer of the platform, local communication could instead be virtualized itself. To optimize network communication among VMs, there is the introduction of the ''virtual switch''. The ''vSwitch'' behaves like a physical switch, but is virtualized into the platform (see Figure 3). In this figure, virtualized interfaces (VIFs) associated with the VMs communicate through the virtual switch to the physical interfaces (PIFs). 

''Figure 3. High-level view of Open vSwitch with virtual and physical interfaces''
[img[img/VirtualNetworking.gif]]

Open source is addressing this problem as well, with one very interesting solution called the Open vSwitch. In addition to providing a virtual switch for virtual environments, the vSwitch can also integrate across physical platforms and provide enterprise-level features like virtual local area networks (VLANs), priority-based Quality of Service (QoS), trunking, and support for hardware acceleration (such as single-root I/O virtualization [IOV] network adapters). The ''Open vSwitch'' is currently available for 2.6.15 kernels and supports the range of Linux-based virtualization solutions (Xen, KVM, VirtualBox) and management standards (Remote Switched Port Analyzer [RSPAN], NetFlow, etc.). 

{{op1{VM tools and technologies}}}
As VMs are an aggregation of operating system, root file system, and configuration, the space is ripe for tool development. But to realize the full potential of VMs and tools, there must be a portable way to assemble them. The current approach, called the ''Open Virtualization Format (OVF)'' is a VM construction that is flexible, efficient, and portable. OVF wraps a virtual disk image in an XML wrapper that defines the configuration of the VM, including networking configuration, processor and memory requirements, and a variety of extensible metadata to further define the image and its platform needs. The key capability provided by OVF is the portability to distribute VMs in a hypervisor-agnostic manner.

A number of utilities exist to manage ''VM images'' (VMIs) as well as convert them to and from other formats. The ovftool from VMware is a useful tool that you can use for VMI conversion (for example, to convert from the VMware Virtual Disk Development Kit [VMDK] format into OVF). This tool and others are useful once you have a VMI, but what if you have a physical server you'd like to convert into a VMI? You can employ a useful tool called Clonezilla for this purpose. Although it was originally developed as a disk-cloning tool for disaster recovery, you can use it to convert a physical server instance into a VM for easy deployment into a virtualized infrastructure. Numerous other tools exist (such as utilities built upon libvirt) or are in development for conversion and management as the OVF format gains adoption.

{{op1{Local management}}}
This article explores management from two perspectives. This section discusses platform management; a later section expands to infrastructure management at the higher level.

Red Hat introduced the ''libvirt library'' as an API for managing platform virtualization (hypervisors and VMs). What makes libvirt interesting is that it supports a number of hypervisor solutions (KVM and Xen being two) and provides API bindings for a number of languages (such as C, Python, and Ruby). It provides the "last mile" of management, interfacing directly with the platform hypervisor and extending APIs out to larger infrastructure-management solutions. With libvirt, it's simple to start and stop VMs, and it provides APIs for more advanced operations, such as migration of VMs between platforms. Using libvirt, it's also possible to use its shell (built on top of libvirt), called ''virsh''. 

{{item1{Infrastructure open source technologies}}}
Now that you've seen some of the open source solutions at the virtualized node level, look at some other open source applications that support this infrastructure. This article explores three categories. The first two are infrastructure-level technologies that complement the solutions previously discussed. The third category consists of integrated solutions that bring all of the pieces together for simpler deployment.

{{op1{I/O technologies}}}
Building a scalable and balanced Web architecture depends upon the ability to balance Web traffic across the servers that implement the back-end functionality. A number of load-balancing solutions exist, but recently, Yahoo! open sourced a solution called Traffic Server. Traffic Server is interesting, because it encapsulates a large number of capabilities in one package for cloud infrastructures, including session management, authentication, filtering, load balancing, and routing. Yahoo! initially acquired this product from Inktomi, but has now extended and introduced the product into open source.

{{op1{Infrastructure management}}}
Larger-scale infrastructure management (managing many hypervisors and even more VMs) can be accomplished in a number of ways. Two of the more common solutions are each built from the same platform (libvirt). The ''oVirt package'' is an open VM management tool that scales from a small number of VMs to thousands of VMs running on hundreds of hosts. The oVirt package, developed by Red Hat, is a Web-based management console that, in addition to traditional management, supports the automation of clustering and load balancing. The oVirt tool is written in the Python language. VirtManager, also based on libvirt and developed by Red Hat, is an application with a GTK+ UI (instead of being Web-based like oVirt). VirtManager presents a much more graphically rich display (for live performance and resource utilization) and includes a VNC client viewer for a full graphical console to remote VMs.

And ''Puppet'' is another open source package designed for data center infrastructure (a cloud). Although not designed solely for virtualized infrastructures, it simplifies the management of large infrastructures by abstracting the details of the peer operating system. It does this through the use of the Puppet language. Puppet is ideal for automating administrative tasks over large numbers of servers and is widely used today. 
 
{{item1{Integrated IaaS solutions}}}
The following open source packages take a more holistic approach by integrating all of the necessary functionality into a single package (including virtualization, management, interfaces, and security). When added to a network of servers and storage, these packages produce flexible cloud computing and storage infrastructures (IaaS). For details about these platforms, see Resources.

{{op1{Eucalyptus}}}
One of the most popular open source packages for building cloud computing infrastructures is Eucalyptus (for Elastic Utility Computing Architecture for Linking Your Programs to Useful Systems). What makes it unique is that its interface is compatible with Amazon Elastic Compute Cloud (Amazon EC2 — Amazon's cloud computing interface). Additionally, Eucalyptus includes ''Walrus'', which is a cloud storage application compatible with Amazon Simple Storage Service (Amazon S3 — Amazon's cloud storage interface).

Eucalyptus supports KVM/Linux and Xen for hypervisors and includes the ''Rocks cluster'' distribution for cluster management.

{{op1{OpenNebula}}}
OpenNebula is another interesting open source application (under the Apache license) developed at the Universidad Complutense de Madrid. In addition to supporting private cloud construction, OpenNebula supports the idea of hybrid clouds. A hybrid cloud permits combining a private cloud infrastructure with a public cloud infrastructure (such as Amazon) to enable even higher degrees of scaling.

OpenNebula supports Xen, KVM/Linux, and VMware and relies on elements like libvirt for management and introspection.

{{op1{Nimbus}}}
Nimbus is another IaaS solution focused on scientific computing. With Nimbus, you can lease remote resources (such as those provided by Amazon EC2) and manage them locally (configure, deploy VMs, monitor, etc.). Nimbus morphed from the Workspace Service project (part of Globus.org). Being dependent on Amazon EC2, Nimbus supports Xen and KVM/Linux.

{{op1{Xen Cloud Platform}}}
Citrix has integrated Xen into an IaaS platform, using Xen as the hypervisor while incorporating other open source capabilities such as the Open vSwitch. An interesting advantage to the Xen solution is the focus on standards-based management (including OVF, Distributed Management Task Force [DTMF], the Common Information Model [CIM], and Virtualization Management Initiative [VMAN]) from the project Kensho. The Xen management stack supports SLA guarantees, along with detailed metrics for charge-back.

{{op1{OpenQRM}}}
Last but not least is OpenQRM, which is categorized as a data center management platform. OpenQRM provides a single console to manage an entire virtualized data center that is architecturally pluggable to permit integration of third-party tools. OpenQRM integrates support for high availability (through redundancy) and supports a variety of hypervisors, including KVM/Linux, Xen, VMware, and Linux VServer. 

<<toBalaNotes "cloud">>

{{item1{Xen Hypervisor - Leading Open Source Hypervisor for Servers}}}
官方網址 : http://www.xen.org/

The Xen® hypervisor, the powerful open source industry standard for virtualization, offers a powerful, efficient, and secure feature set for virtualization of x86, x86_64, IA64, ARM, and other CPU architectures. It supports a wide range of guest operating systems including Windows®, Linux®, Solaris®, and various versions of the BSD operating systems.

Enterprises looking to increase server utilization, consolidate server farms, reduce complexity, and decrease total cost of ownership are embracing server virtualization. The Xen® hypervisor is the fastest and most secure infrastructure virtualization solution available today, supporting a wide range of guest operating systems including Windows®, Linux®, Solaris®, and various versions of the BSD operating systems.

With Xen virtualization, a thin software layer known as the Xen hypervisor is inserted between the server's hardware and the operating system. This provides an abstraction layer that allows each physical server to run one or more "virtual servers", effectively decoupling the operating system and its applications from the underlying physical server.

The Xen hypervisor is a unique open source technology, developed collaboratively by the Xen community and engineers at over 50 of the most innovative data center solution vendors, including AMD, Cisco, Dell, Fujistu, HP, IBM, Intel, Mellanox, Network Appliance, Novell, Red Hat, Samsung, SGI, Sun, Unisys, Veritas, Voltaire, and Citrix. Xen is licensed under the GNU General Public License (GPL2) and is available at no charge in both source and object format. Xen is, and always will be, open sourced, uniting the industry and the Xen ecosystem to speed the adoption of virtualization in the enterprise.

The Xen hypervisor is also exceptionally lean-- less than 150,000 lines of code. That translates to extremely low overhead and near-native performance for guests. Xen re-uses existing device drivers (both closed and open source) from Linux, making device management easy. Moreover Xen is robust to device driver failure and protects both guests and the hypervisor from faulty or malicious drivers.

<<toBalaNotes "Xen">>

///%Xen
//%/
///%cloud
//%/
''參考文章''
1. Understanding the Cloud Landscape
http://broadcast.oreilly.com/2010/04/understanding-the-cloud-landscape.html
2. 雲端運算的儲存基礎架構 - 揭開雲端儲存的面貌 (必讀)
http://www.runpc.com.tw/content/cloud_content.aspx?id=105324

{{item1{虛擬化運作}}}
簡單來說,雲端運算就是將''運算''、''儲存及網路'',抑或''硬體''、''軟體''及''平台''等 IT 資源,透過''虛擬化''之資源利用最佳化,以及''可量化計費''的服務型態,經由網路分送,給使用者隨時存取的一種服務平台。 

該服務就像水電等公共設施一般,使用者不需了解其背後運作技術及狀況,企業用戶也不必耗費可觀的人力及管理成本,進行任何IT設備及資源的管理。所有資源的分配及管理,設備的汰換、更新與擴充,全都由雲端運算供應商負責一切,並依使用者需求提供可擴展性的高可用性服務,至於使用戶則只要按使用量付費即可。

事實上,雲端運算所採用的理論基礎與技術皆非全新,從過去以來的伺服器整合(Server Consolidation)、Web Service、服務導向架構(SOA)、公共運算(Utility Computing)、主機代管等服務或平台上,就已經可以看到與雲端運算概念相似的身影。這也是當前雲端運算一直沒有被明確定義的原因之一。

不論如何,隨著網路頻寬的提昇、Web 2.0 與 虛擬化技術 的日漸普及,雲端運算在上述各種有著相似概念技術、服務或平台長久所奠下的基礎上發展,開始愈受注目與青睞。

在許多技術服務當中,網格運算(Grid Computing)最常與雲端運算相提並論,雖然兩者皆採分散式運算架構,但事實上,卻有很大差異,其中尤以資源擴展性最為明顯。前者強調所有運算資源集中化,以因應需要大規模運算的應用任務,缺乏擴展彈性;後者適用於多重用戶之大量單一請求,並依不同個別需求調配資源,具備動態擴展能力。 

''[註] IaaS 服務大都使用虛擬化運作方式''

{{item1{實體化運作}}}
目前 Google 所擁有的雲端運算環境,每天要執行超過 10 萬個 MapReduce 任務,每天處理的資料量超過 20 PB。所使用的系統架構概略可以分成三個部分:包括 MapReduce 模式、BigTable 資料庫系統、以及最底層的GFS檔案系統。Google 為了節省處理大量資料的成本,自行開發出個人電腦等級的標準伺服器,包括 Linux 作業系統、雙核心 x86 處理器、4~8GB 記憶體、IDE 硬碟和 GbE 等級網路卡,例如有一款伺服器規格是 2 顆 Xeon 2GHz 處理器,搭配 4GB 記憶體及 2 臺 160GB 的 IDE硬碟。

''Google 自行設計便宜硬體來處理大量資料''
[img[img/googlecloud.jpg]] 

''系統架構''
[img[img/googlecloud01.png]]

''[註] PaaS 及 SaaS 服務, 目前使用 純實體化 或 實虛混合運作方式''

<<toBalaNotes "1">>

///%1
//%/
''參考文章''
1. 雲端運算與網路安全趨勢 部落格
http://domynews.blog.ithome.com.tw/category/1252/3984
2. Amazon EC2 使用操作筆記 (使用 Elasticfox)
http://plog.longwin.com.tw/my_note/2009/02/12/amazon-ec2-build-op-elasticfox-note-2009
3. Install Elastix on Amazon EC2
http://www.osslab.org.tw/VoIP/IP_PBX/%E8%BB%9F%E9%AB%94%E5%BC%8F_IP_PBX/Asterisk/Tips/Install_Elastix_on_Amazon_EC2
4. 阿正老師教你免費玩Amazon EC2雲端主機
http://blog.soft.idv.tw/?p=824&page=2

{{item1{Amazon EC2}}}
Amazon EC2 (Elastic Compute Cloud) 是一個讓使用者可以租用雲端電腦運行所需應用的系統。EC2 藉由提供 Web 服務的方式讓使用者可以彈性地運行自己的 Amazon 機器映象檔,使用者將可以在這個虛擬機器上運行任何自己想要的軟件或應用程式。

使用者可以隨時創建、執行、終止自己的虛擬伺服器,使用多少時間算多少錢,也因此這個系統是 "彈性" 使用的。EC2 讓使用者可以控制執行虛擬伺服器的主機地理位置,這可以讓延遲還有備援性最高。例如,為了讓系統維護時間最短,用戶可以在每個時區都運行自己的虛擬伺服器。Amazon.com 以 Amazon Web Services (AWS) 的品牌提供 EC2 的服務。

''Amazon 現有 365 天 99.95% 可用性的服務層級協定(SLA)''

<html><img src="img/amazon/amazon01.png" width="100%" height="100%" /> </html>

''使用案例''
[[1. Novell 以 Amazon EC2 推 Linux 作業系統雲端服務 (系統實體在 Amazon, 服務由 Novell 提供)|http://www.ithome.com.tw/itadm/article.php?c=62865]]

<<toBalaNotes "1">>

{{item1{Business process outsourcing (商業流程委外)}}}
網址 : http://zh.wikipedia.org/zh-tw/%E5%95%86%E6%A5%AD%E6%B5%81%E7%A8%8B%E5%A7%94%E5%A4%96

[img[img/xaas/cloudBPO.jpg]]

''Business process outsourcing (BPO)'' is a form of outsourcing  that involves the contracting of the operations and responsibilities of specific business functions (or processes) to a third-party service provider. Originally, this was associated with manufacturing  firms, such as Coca Cola that outsourced large segments of its supply chain.[1]. In the contemporary context, it is primarily used to refer to the outsourcing of services.

{{item1{NEC's Ultimate SaaS Solution}}}
網址 : http://www.nec.com/global/solutions/nsp/saassolutions/saas_3.html

[img[img/xaas/NEC_saas.jpg]]

In addition to SaaS, NEC also provides Platform as a Service (PaaS) and Desktop as a Service (DaaS) - key components of NEC's cloud computing business solution for telecom operators.

<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
{{item1{取得最新 Tiny Core Linux 的 核心檔 及 Root File System 檔}}}

''64 位元版下載網址 : ''http://distro.ibiblio.org/tinycorelinux/5.x/x86_64/release/distribution_files/
{{{
corepure64.gz	2014-Jan-25 06:16:33	6.7M 	application/x-gzip
corepure64.gz.md5.txt	2014-Jan-25 06:16:33	0K 	text/plain
modules64.gz	2013-Nov-29 01:42:46	3.6M 	application/x-gzip
modules64.gz.md5.txt	2013-Nov-29 01:42:46	0K 	text/plain
rootfs64.gz	2014-Jan-24 09:10:24	3.2M 	application/x-gzip
rootfs64.gz.md5.txt	2014-Jan-24 09:10:24	0K 	text/plain
vmlinuz64
}}}

''[註]'' corepure64.gz = rootfs64.gz + modules64.gz

''32 位元版下載網址 :'' http://distro.ibiblio.org/tinycorelinux/5.x/x86/release_candidates/distribution_files/

{{{
core.gz	2014-Jan-24 09:04:44	5.7M 	application/x-gzip
core.gz.md5.txt	2014-Jan-24 09:04:44	0K 	text/plain
modules.gz	2013-Nov-29 01:42:46	3.5M 	application/x-gzip
modules.gz.md5.txt	2013-Nov-29 01:42:46	0K 	text/plain
modules64.gz	2013-Nov-29 01:42:46	3.6M 	application/x-gzip
modules64.gz.md5.txt	2013-Nov-29 01:42:46	0K 	text/plain
rootfs.gz	2014-Jan-24 09:04:55	2.2M 	application/x-gzip
rootfs.gz.md5.txt	2014-Jan-24 09:04:55	0K 	text/plain
rootfs64.gz	2014-Jan-24 09:10:24	3.2M 	application/x-gzip
rootfs64.gz.md5.txt	2014-Jan-24 09:10:24	0K 	text/plain
vmlinuz	2013-Nov-29 01:45:05	2.8M 	application/octet-stream
vmlinuz64	2013-Nov-29 01:45:09	3.1M 	application/octet-stream
}}}

''[註]'' core.gz = rootfs.gz + modules.gz

''[注意]'' 下載檔案請存在家目錄

{{item1{直接由 Linux 核心檔 及 RAM 磁碟檔啟動}}}
{{{
$ kvm -m 512  -kernel vmlinuz -initrd core.gz 
}}}

系統啟動後, 執行 ''version 命令'', 確認 Tiny Core Linux 版本, 關閉系統請執行 ''poweroff 命令''

''qemu-kvm 啟動參數''
{{{
-kernel bzImage use 'bzImage' as kernel image
-initrd file use 'file' as initial ram disk
-append cmdline use 'cmdline' as kernel command line
}}}
<<toBalaNotes "1">>
{{item1{重製 Initial RAM disk 檔案內容}}}

''1.建立重製目錄''
{{{
$ cd 

$ mkdir tsc32
$ cd tsc532
}}}

''2. 解開 core.gz (initial RAM disk)''
{{{
# 解開 core.gz, 內容直接存在目前所在的目錄
$ zcat ../core.gz | sudo cpio -i -H newc

# 檢視解壓縮後內容
$ ll
總計 72
drwxrwxr-x 17 student student 4096  1月 31 14:04 ./
drwxr-xr-x 29 student student 4096  1月 31 14:03 ../
drwxr-xr-x  2 root    root    4096  1月 31 14:04 bin/
drwxrwxr-x  7 root    staff   4096  1月 31 14:04 dev/
drwxr-xr-x  8 root    root    4096  1月 31 14:04 etc/
drwxrwxr-x  2 root    staff   4096  1月 31 14:04 home/
-rwxr-xr-x  1 root    root     496  1月 31 14:04 init*
drwxr-xr-x  4 root    root    4096  1月 31 14:04 lib/
lrwxrwxrwx  1 root    root      11  1月 31 14:04 linuxrc -> bin/busybox*
drwxrwxr-x  2 root    staff   4096  1月 31 14:04 mnt/
drwxrwsr-x  2 root    staff   4096  1月 31 14:04 opt/
drwxrwxr-x  2 root    staff   4096  1月 31 14:04 proc/
drwxrwxr-x  2 root    staff   4096  1月 31 14:04 root/
drwxrwxr-x  3 root    staff   4096  1月 31 14:04 run/
drwxr-xr-x  2 root    root    4096  1月 31 14:04 sbin/
drwxrwxr-x  2 root    staff   4096  1月 31 14:04 sys/
drwxrwxrwt  2 root    staff   4096  1月 31 14:04 tmp/
drwxr-xr-x  7 root    root    4096  1月 31 14:04 usr/
drwxrwxr-x  8 root    staff   4096  1月 31 14:04 var/
}}}

''opt 目錄內容 : 系統登入前會被自動執行的程式''
{{{
$ tree -a opt
opt
├── bootlocal.sh
├── bootsync.sh
├── .filetool.lst
├── shutdown.sh
├── tcemirror
└── .xfiletool.lst

0 directories, 6 files
}}}

''etc/skel 目錄內容 : 系統登入後會被自動複製到登入使用者家目錄的程式 ''
{{{
$ tree -a etc/skel/
etc/skel/
├── .ash_history
├── .ashrc
└── .profile

0 directories, 3 files
}}}

''3.修改系統設定檔 (增加 alias 命令)''
{{{
$ sudo nano etc/skel/.ashrc 
# ~/.ashrc: Executed by SHells.
#
                     :
alias ping='ping -c 4'
alias bye='sudo poweroff'
}}}

''3.重製 initial RAM disk 檔案 (產生 tsc32.gz)''
{{{
$ find | sudo cpio -o -H newc | gzip -2 > ../tsc532.gz 
}}}

''4.測試系統''
{{{
$ kvm -name "tsc532" -m 128 -kernel ../vmlinuz -initrd ../tsc532.gz

在重製系統中, 輸入以下命令
$ bye
}}}

''@@color:blue;font-size:12pt;練習 : 請重製 64 位元的 initial RAM disk 檔案 (tsc564.gz)@@''
<<toBalaNotes "2">>
{{item1{在終端機文字模式, 啟動虛擬電腦}}}
{{{
cd ~/tsc532
$ kvm -name "tsc532" -m 128 -kernel ../vmlinuz -initrd ../tsc532.gz -nographic -curses
}}}

''-nographic :'' Normally, QEMU uses SDL to display the VGA output. With this option, you can totally disable graphical output so that QEMU is a simple command line application. The emulated serial port is redirected on the console. Therefore, you can still use QEMU to debug a Linux kernel with a serial console.

''-curses :'' Normally, QEMU uses SDL to display the VGA output. With this option, QEMU can display the VGA output when in text mode using a curses/ncurses interface. Nothing is displayed in graphical mode.




///%1
//%/

///%2
//%/
{{{
<<forEachTiddler
    where
        'tiddler.tags.contains("plugin")'
    write
        '""'
        end 'count+" Tiddlers found\n"'
        none '"No Tiddlers found\n"'
>>
}}}
The macro writes an empty string for every tiddler tagged "basic", i.e. it writes nothing. 

Just at the end it writes the number of found tiddlers (using the ''end'' feature of the ForEachTiddler macro) or "No Tiddler found" if no tiddler is tagged with "basic"  (using the ''none'' parameter) .

''//Result://''
<<forEachTiddler
    where
        'tiddler.tags.contains("plugin")'
    write
        '""'
        end 'count+" Tiddlers found\n"'
        none '"No Tiddlers found\n"'
>>
{{item1{安裝 SUN JDK}}}
For Ubuntu 10.04 LTS, the ''sun-java6-jdk'' packages have been dropped from the Multiverse section of the Ubuntu archive. It is recommended that you use ''openjdk-6'' instead.

''[註] Sun Java moved to the Partner repository''

If you can not switch from the proprietary Sun JDK/JRE to OpenJDK, you can install sun-java6 packages from the Canonical Partner Repository. You can configure your system to use this repository via command-line:
{{{
$ sudo apt-get install python-software-properties

$ sudo add-apt-repository "deb http://archive.canonical.com/ lucid partner"
}}}

執行完上述命令後, 在 /etc/apt/source.list 檔案的最後, 會自動加入下式 : 
{{{
$ deb http://archive.canonical.com/ lucid partner
}}}

重新更新套件資料庫
{{{
$ sudo apt-get update
}}}

安裝 SUN JDK6
{{{
$ sudo apt-get install sun-java6-jdk
}}}

{{item1{下載 Android AVD 可用套件}}}

[img[img/android/avd01.png]]

[img[img/android/avd02.png]]

[img[img/android/avd03.png]]

[img[img/android/avd04.png]]

[img[img/android/avd05.png]]

<<toBalaNotes "1">>


///%1
//%/

///%2
//%/

///%3
//%/


Apache2 is configured by placing directives in plain text configuration files. These directives are separated between the following files and directories:

* ''apache2.conf'': the main Apache2 configuration file. Contains settings that are global to Apache2.
* ''conf.d'': contains configuration files which apply globally to Apache2. Other packages that use Apache2 to serve content may add files, or symlinks, to this directory.
* ''envvars'': file where Apache2 environment variables are set.
* ''httpd.conf'': historically the main Apache2 configuration file, named after the httpd daemon. The file can be used for user specific configuration options that globally effect Apache2.
* ''mods-available'': this directory contains configuration files to both load modules and configure them. Not all modules will have specific configuration files, however.
* ''mods-enabled'': holds symlinks to the files in /etc/apache2/mods-available. When a module configuration file is symlinked it will be enabled the next time apache2 is restarted.
* ''ports.conf'': houses the directives that determine which TCP ports Apache2 is listening on.
* ''sites-available'': this directory has configuration files for Apache2 Virtual Hosts. Virtual Hosts allow Apache2 to be configured for multiple sites that have separate configurations.
* ''sites-enabled'': like mods-enabled, sites-enabled contains symlinks to the /etc/apache2/sites-available directory. Similarly when a configuration file in sites-available is symlinked, the site configured by it will be active once Apache2 is restarted.

In addition, other configuration files may be added using the ''Include'' directive, and wildcards can be used to include many configuration files. Any directive may be placed in any of these configuration files. Changes to the main configuration files are only recognized by Apache2 when it is started or restarted.

The server also reads a file containing mime document types; the filename is set by the ''TypesConfig'' directive, and is ''/etc/mime.types'' by default.

{{item1{列出 /etc/apache2 目錄檔案}}}
{{{
$ ls -al /etc/apache2/
總計 88
drwxr-xr-x   7 root root  4096 2010-07-08 18:22 .
drwxr-xr-x 131 root root 12288 2010-07-09 08:52 ..
-rw-r--r--   1 root root  8113 2010-04-14 03:27 apache2.conf
drwxr-xr-x   2 root root  4096 2010-07-09 10:27 conf.d
-rw-r--r--   1 root root   725 2010-04-14 03:27 envvars
-rw-r--r--   1 root root   229 2010-07-08 18:22 httpd.conf
-rw-r--r--   1 root root 31063 2010-04-14 03:27 magic
drwxr-xr-x   2 root root  4096 2010-07-06 16:27 mods-available
drwxr-xr-x   2 root root  4096 2010-07-08 18:18 mods-enabled
-rw-r--r--   1 root root   750 2010-04-14 03:27 ports.conf
drwxr-xr-x   2 root root  4096 2010-07-06 16:27 sites-available
drwxr-xr-x   2 root root  4096 2010-07-06 16:27 sites-enabled
}}}

{{item1{列出 /etc/apache2/mods-available/ 目錄檔案}}}
此目錄存放 Apache 2 可使用的功能模組, [[完整模組說明 (Module Index)|http://httpd.apache.org/docs/2.0/mod/#D]]
{{{
$ ls /etc/apache2/mods-available/
actions.conf          cache.load         filter.load          proxy_http.load
actions.load          cern_meta.load     headers.load         proxy.load
alias.conf            cgid.conf          ident.load           proxy_scgi.load
alias.load            cgid.load          imagemap.load        reqtimeout.conf
asis.load             cgi.load           include.load         reqtimeout.load
auth_basic.load       charset_lite.load  info.conf            rewrite.load
auth_digest.load      dav_fs.conf        info.load            setenvif.conf
authn_alias.load      dav_fs.load        ldap.load            setenvif.load
authn_anon.load       dav.load           log_forensic.load    speling.load
authn_dbd.load        dav_lock.load      mem_cache.conf       ssl.conf
authn_dbm.load        dbd.load           mem_cache.load       ssl.load
authn_default.load    deflate.conf       mime.conf            status.conf
authn_file.load       deflate.load       mime.load            status.load
authnz_ldap.load      dir.conf           mime_magic.conf      substitute.load
authz_dbm.load        dir.load           mime_magic.load      suexec.load
authz_default.load    disk_cache.conf    negotiation.conf     unique_id.load
authz_groupfile.load  disk_cache.load    negotiation.load     userdir.conf
authz_host.load       dump_io.load       proxy_ajp.load       userdir.load
authz_owner.load      env.load           proxy_balancer.load  usertrack.load
authz_user.load       expires.load       proxy.conf           version.load
autoindex.conf        ext_filter.load    proxy_connect.load   vhost_alias.load
autoindex.load        file_cache.load    proxy_ftp.load
}}}

{{item1{列出 /etc/apache2/mods-enabled/ 目錄檔案}}}
此目錄存放 Apache 2 已啟動的模組, 大都以連接檔格式存在
{{{
$ ls /etc/apache2/mods-enabled/
alias.conf            autoindex.conf  env.load          setenvif.load
alias.load            autoindex.load  mime.conf         status.conf
auth_basic.load       cgid.conf       mime.load         status.load
authn_file.load       cgid.load       negotiation.conf  userdir.conf
authz_default.load    deflate.conf    negotiation.load  userdir.load
authz_groupfile.load  deflate.load    reqtimeout.conf
authz_host.load       dir.conf        reqtimeout.load
authz_user.load       dir.load        setenvif.conf
}}}

<<toBalaNotes "module">>

///%module
//%/

''參考文章''
1. Understanding Hadoop Clusters and the Network (必讀)
http://bradhedlund.com/2011/09/10/understanding-hadoop-clusters-and-the-network/#download
2. Wu Peng Ta's BLOG
http://wupengta.blogspot.tw/2012/08/linux-kvm-hadoop.html

[img[img/hadoop/Hadoop-Server-Roles-s.png]]

<<toBalaNotes "1">>
[img[img/hadoop/Preparing-HDFS-Writes-s.png]]

[img[img/hadoop/HDFS-Pipleline-Write-s.png]]

<<toBalaNotes "2">>

[img[img/hadoop/Name-Node-s.png]]
<<toBalaNotes "3">>

[img[img/hadoop/Client-Read-from-HDFS-s.png]]
<<toBalaNotes "4">>

///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/
''參考文章''
1. Convert VMware .vmdk to KVM .qcow2 or Virtualbox .vdi
http://blog.bodhizazen.net/linux/convert-vmware-vmdk-to-kvm-qcow2-or-virtualbox-vdi/
2. Converting a VMWare image to Xen HVM
http://ian.blenke.com/vmware/vmdk/xen/hvm/qemu


本文網址 : http://www.linux-kvm.org/page/How_To_Migrate_From_Vmware_To_KVM

Starting at v0.12, Qemu-kvm has native support to VMware's disk images v6 (seems to be compatible with v7, used by VMware Server). So VMware images can could be run with Qemu-kvm without any modification (make backups and do it at your own risks though !).

Look at your VMX configuration file:
{{{
* scsi0:0.fileName = "zimbra-000001.vmdk"
* uuid.bios = "56 4d 3f 3d 32 80 5b f2-94 31 21 c9 b2 c3 93 b9"
* ethernet0.generatedAddress = "00:0c:29:c3:93:b9" 
}}}

And then build the command-line:
{{{
kvm -drive file=zimbra-000001.vmdk,boot=on \
  -net nic,macaddr=00:0c:29:c3:93:b9 -net tap \
  -uuid 564d3f3d-3280-5bf2-9431-21c9b2c393b9
}}}
The UUID is optional, but might be useful for applications using it for validation (i.e. Windows), and the MAC address as well.

Second way could be to convert the disk image:
{{{
kvm-img convert -O qcow2 zimbra-000001.vmdk zimbra.qcow2
}}}

<<toBalaNotes "vm2kvm">>
本文網址 :https://help.ubuntu.com/community/KVM/FAQ

How to convert VMware machines to virt-manager?

To use the VMware machine from within virt-manager, the .vmx file must be converted to libvirt's .xml. vmware2libvirt was created to help with this. It's available in Intrepid/Jaunty, install package ''virt-goodies''. After installing it can be used like so:
{{{
$ vmware2libvirt -f ./file.vmx > file.xml
$ virsh -c qemu:///system define file.xml
}}}
The first command converts the VMware 'file.vmx' file to the libvirt compatible 'file.xml'. See  man vmware2libvirt  for details. The second command imports file.xml into libvirt. The imported .xml files are stored in /etc/libvirt/qemu.

''Caveats''

While vmware2libvirt works well on simple virtual machines, there are limitations because .vmx files don't always contain enough information, and also because vmware2libvirt tries not to make too many assumptions about what it finds. A couple of things to look out for:
1.

            While vmware2libvirt attempts to detect a 64-bit guest, be sure that your 64-bit guest has in its .xml file:

            <os>
             <type arch='x86_64' machine='pc'>hvm</type>
             ...
            </os>

2. vmware2libvirt only detects and uses the first ethernet interface found. Additional interfaces can be added from within virt-manager.

3. Currently the first scsi disk is used if found, otherwise the first ide disk. Additional disks can be added from within virt-manager.

4.The converted virtual machine is hard-coded to use 1 cpu. This can be changed with:

            <vcpu>2</vcpu>

5. vmware2libvirt does not (and cannot) convert anything that was VMware-specific within the guest. See 'Guest Notes' below for more details. 

''Using and Converting VMWar'e virtual Disk Files''
kvm has the ability to use VMWare's .vmdk disk files directly, as long as the disk is wholly contained in a single vmdk file.

But VMWare also allows splitting a disk into smaller, usually 2 GB, vmdk files. kvm can't use these. You can convert these files into a single virtual disk file using vmware-vdiskmanager. It is e.g. included in VMWare Server (freely available).

$ vmware-vdiskmanager -r <Name of splitted vmdk base file> -t 0 <Name of new single vmdk file>

Modify the virtual machines xml file in /etc/libvirt/qemu:
{{{
 ...
 <disk type='file' device='disk'>
      <source file='/var/lib/libvirt/images/diskname.vmdk'/>
      <target dev='hda' bus='ide'/>
 </disk>
...
}}}
and redefine it:
{{{
$ virsh -c qemu:///system define NameOfMachine.xml
}}}
IMPORTANT: keep in mind that while the .vmx file is converted to .xml, the disks are used as is. Please make backups, especially if you want to use the virtual machine in VMWare later.

kvm is not able to make snapshots when using vmdk disk files. So I recommend to convert the virtual disk file in qemu's format qcow2. Package qemu contains a utility qemu-img to do this:
{{{
qemu-img convert diskname.vmdk -O qcow2 diskname.qcow2
}}}}
change the machines xml file and redefine it (see above).

''Should VMWare Tools be kept after conversion ?''
If converting from vmware to libvirt, be sure to remove ''vmware-tools'' if you have it installed (otherwise it will overwrite xorg.conf on reboot) 

///%vm2kvm
//%/
1. ubuntu kvm migrate nfs
http://l122428342.wordpress.com/2010/08/23/ubuntu-kvm-migrate-nfs/

{{item1{以​ virsh 來​進​行​即​時​ KVM 遷​移​}}}
本文網址 : http://docs.fedoraproject.org/zh-TW/Fedora/12/html/Virtualization_Guide/sect-Virtualization_Guide-KVM_live_migration-Live_KVM_migration_with_virsh.html

「​遷​移​」​代​表​將​虛​擬​化​客​座​端​由​一​部​主​機​移​至​另​一​部​主​機​上​的​程​序​。​遷​移​是​虛​擬​化​ 的​關​鍵​功​能​之​一​,因​為​軟​體​和​硬​體​是​完​全​分​離​的​。​遷​移​對​於​下​列​情​況​相​當​有​幫​助​:

* Load balancing - guests can be moved to hosts with lower usage when a host becomes overloaded.
* Hardware failover - when hardware devices on the host start to fail, guests can be safely relocated so the host can be powered down and repaired.
* Energy saving - guests can be redistributed to other hosts and host systems powered off to save energy and cut costs in low usage periods.
* Geographic migration - guests can be moved to another location for lower latency or in serious circumstances. 

您​可​進​行​即​時​或​離​線​遷​移​。​若​要​遷​移​客​座​端​,您​必​須​共​享​儲​存​裝​置​。​遷​移​就​是​將 ''客​座端​記​憶​體'' ​傳​送​至​目​標​主​機​上​。​共​享​儲​存​裝​置​可​儲​存​客​座​端​的​預​設​檔​案​系​統​。​檔​案​系統​映​像​檔​並​非​藉​由​網​路​來​從​來​源​主​機​上​傳​送​至​目​標​主​機​上​的​。​
An offline migration suspends the guest then moves an image of the guests memory to the destination host. The guest is resumed on the destination host and the memory the guest used on the source host is freed.
離​線​遷​移​程​序​的​所​需​時​間​取​決​於​網​路​頻​寬​和​延​遲​時​間​。​在​一​個​ 1 Gbit 的​乙​太​網​路​上​,遷​移​一​台​擁​有​ 2GB 記​憶​體​的​客​座​端​大​約​會​花​上​十​秒​鐘​左​右​的​時​間​。​

即​時​遷​移​會​讓​客​座​端​持​續​在​來​源​主​機​上​執​行​,並​且​在​不​停​用​該​客​座​端​的​情​況​下​,開​始​移​動​記​憶​體​。​當​映​像​檔​被​傳​送​時​,所​有​經​過​修​改​的​記​憶​體​分​頁​(memory page)皆​會​受​到​監​控​以​查​看​是​否​有​任​何​變​更​,並​傳​送​至​目​的​地​。​記​憶​體​會​隨​著​受​到​變​更​的​分​頁​一​起​更​新​。​這​項​程​序​會​持​續​進​行​,直​到​客​座​端​被​允​許​的​暫​停​時​間​與​最​後​幾​個分頁​傳​輸​的​預​計​時​間​相​等​。​KVM 會​估​計​判​斷​出​剩​餘​的​時​間​有​多​久​,並​嘗​試​將​最​大​量​的​分​頁​由​來​源​傳​輸​至​目​的​地​,直​到​ KVM 能​預​測​出​剩​下​的​分​頁​可​在​虛​擬​客​座​端​暫​停​時​,在​設​定​的​時​間​內​傳​輸​完​成​。​暫​存​器​會​被 ​載​入​到​新​的​主​機​上​,並​且​客​戶​端​將​會​在​目​標​主​機​上​復​原​。​若​客​座​端​無​法​合​併​的​話​(當 ​客​座​端​負​載​過​高​時​),客​座​端​將​會​暫​停​,並​且​離​線​遷​移​程​序​將​會​啟​動​。​離​線​遷​移​程​序​的​所​需​時​間​,取​決​於​網​路​頻​寬​和​延​遲​時​間​。​若​客​座​端​正​被​大​量​,使​用​或​是​頻​寬​過​低​的​話​,遷​移​程​序​將​會​花​上​較​久​的​時​間​。​ 

''[注意]'' 「​遷​移​」不是執行 image 檔案的複製, 而是 記憶體內容複製

1. 驗​證​客​座​端​是​否​正​在​運​作​
{{{
請​由​來​源​系​統​ test1.bne.redhat.com 上​驗​證​ CentOS4test 是​否​正​在​運​作​:

[root@test1 ~]# virsh list
Id Name                 State
----------------------------------
10 CentOS4                running
}}}

2. 遷​移​客​座​端​
{{{
執​行​下​列​指​令​來​將​客​座​端​即​時​遷​移​至​目​的​地​ test2.bne.redhat.com。​請​將​ /system 附​加​至​目​標​ URL 的​尾​端​來​讓​ libvirt 知​道​您​需​要​完​整​的​存​取​權​限​。​

# virsh migrate --live CentOS4test qemu+ssh://test2.bne.redhat.com/system

Once the command is entered you will be prompted for the root password of the destination system.
}}}

3. 等​待​
{{{
遷​移​程​序​可​能​會​根​據​客​座​端​的​負​載​和​大​小​而​花​上​一​段​時​間​。​virsh 只​會​回​報​錯​誤​。​客​座​端​會​繼​續​在​來​源​主​機​上​執​行​直​到​完​全​被​遷​移​。​
}}}

4. 驗​證​客​座​端​已​到​達​目​標​主​機​
{{{
請​由​目​標​系​統​ test2.bne.redhat.com 上​驗​證​ CentOS4test 是​否​有​在​運​作​:

[root@test2 ~]# virsh list
Id Name                 State
----------------------------------
10 CentOS4                running
}}}

<<toBalaNotes "1">>



///%1
//%/
參考文章
1. Universal TUN/TAP device driver.
http://www.kernel.org/doc/Documentation/networking/tuntap.txt
2. Tun/Tap interface tutorial
http://backreference.org/2010/03/26/tuntap-interface-tutorial/
3. Ubuntu Server Guide
https://help.ubuntu.com/12.04/serverguide/index.html
4. 認識邊緣網路架構 VEB、VN-link、VEPA技術介紹 (一定要看)
http://www.netadmin.com.tw/article_content.aspx?sn=1112070005
5. A nice overview of MacVTap (without boring details about not doing what you would expect)
http://virt.kernelnewbies.org/MacVTap
6. Configuring a Bridge in Linux (有片段文字, 說明為何產生 Bridge 會多一張網卡)
http://www.6test.edu.cn/~lujx/linux_networking/0131777203_ch12lev1sec3.html
7. Kernel Korner - Linux as an Ethernet Bridge
http://www.linuxjournal.com/article/8172
8. ebtables (根據 L2 的資訊去過濾封包)
http://ebtables.sourceforge.net/

{{item1{虛擬網卡 - TUN/TAP}}}
@@font-size:14pt;
The Tun/Tap user-space tunnel driver which was included in the Linux kernel as of version ''2.4'', also originally developed by ''Maxim Krasnyansky''. ''Bishop Clark'' is the current maintainer.
@@
{{op1{tun (network TUNnel) 虛擬的是 點對點 設備}}}
{{{
-simulates a network layer device
-layer 3 packets, such as IP packet
-used with routing
}}}
{{op1{tap (network TAP) 虛擬的是 乙太網路 設備}}}
{{{
-simulates an Ethernet device
-layer 2 packets, such as Ethernet frames
-used to create a network bridge
}}}

{{op1{安裝 TUN/TAP 管理套件}}}
TUN/TAP 原本就是 Linux 系統所提供的虛擬網路介面裝置,只是預設並不是每套 Linux 的發行版本都有內建此功能,以 Ubuntu 12.04 來說,預設就沒有安裝 TUN/TAP 的相關套件,所以要使用 TUN/TAP 來建立虛擬網路卡之前,則需要先來安裝操控 TAP 的相關套件。

操控 TUN/TAP 的套件名稱為 ”uml_utilities”,所以可以利用以下指令來安裝此套件:
{{{
$ tunctl
程式 'tunctl' 目前尚未安裝。  您可以由輸入以下內容安裝:
sudo apt-get install uml-utilities

$ sudo apt-get install uml-utilities
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
建議套件:
  user-mode-linux
下列【新】套件將會被安裝:
  uml-utilities
升級 0 個,新安裝 1 個,移除 0 個,有 0 個未被升級。
需要下載 61.9 kB 的套件檔。
此操作完成之後,會多佔用 267 kB 的磁碟空間。
下載:1 http://ftp.twaren.net/Linux/Ubuntu/ubuntu/ precise/universe uml-utilities amd64 20070815-1.3ubuntu1 [61.9 kB]
取得 61.9 kB 用了 1秒 (34.5 kB/s)
Selecting previously unselected package uml-utilities.
(正在讀取資料庫 ... 199076 files and directories currently installed.)
正在解開 uml-utilities (從 .../uml-utilities_20070815-1.3ubuntu1_amd64.deb)...
正在進行 ureadahead 的觸發程式 ...
ureadahead will be reprofiled on next reboot
正在進行 man-db 的觸發程式 ...
正在設定 uml-utilities (20070815-1.3ubuntu1) ...
 * Starting User-mode networking switch uml_switch                       [ OK ]
}}}

{{op1{設定 TAP 網路裝置}}}

''1. 產生 TAP 網路裝置''
{{{
$ sudo tunctl -u student
Set 'tap0' persistent and owned by uid 1000
}}}

''2. 手動設定 TAP 網卡的 MAC 位址''
{{{
$ ifconfig tap0 hw ether 4c:22:d0:b8:78:ae
SIOCSIFHWADDR: 此項操作並不被允許

$ sudo ifconfig tap0 hw ether 4c:22:d0:b8:78:ae
$ ifconfig tap0
tap0      Link encap:Ethernet  HWaddr 4c:22:d0:b8:78:ae  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''3. 檢視 TAP 網卡硬體規格''
{{{
$ sudo lshw -class network | sed -n '/tap/,/driver=tun/p'
       logical name: tap0 
       serial: 4c:22:d0:b8:78:ae
       size: 10Mbit/s
       capabilities: ethernet physical
       configuration: autonegotiation=off broadcast=yes driver=tun driverversion=1.6 duplex=full firmware=N/A link=no multicast=yes port=twisted pair speed=10Mbit/s
}}}

''4. 移除 TAP 網路裝置''
{{{
$ tunctl -d tap0
Set 'tap0' nonpersistent
}}}
<<toBalaNotes "1">>
{{item1{虛擬橋接網路裝置 - Bridge}}}
Bridge 簡單來說,就是在電腦網路中,封包交換的技術,與 Router 不一樣的地方,在於 bridge 預設並不知道特定裝置的所在地,而是利用 flooding 的方式來廣播封包,等到查詢的裝置回應之後,就會記錄裝置的 MAC address,以避免再次傳送大量封包來尋找。

而 Bridge在 最開始的時候,收到要交換轉送的封包時,會去檢查封包中 MAC address 的資訊,並根據這個 MAC address 去查詢自己的轉送表(forwarding table),而此資料庫存放著M AC address 與對應出入埠的資訊,這個資料庫內的資訊是怎樣建立的呢?其實是學習而來的。

舉例來說,現在有三台電腦(Host),A、B、C 與一組 bridge,而此 bridge 有三個連接埠port,而 A、B 與 C 則分別連結到其中一個的 bridge port,而當 A 要傳送訊息到 B 的時候,bridge會檢查這個來源位置,並且與所連結的 bridge port number 的資訊,一併記錄在轉送表中,接著會去檢查目的位置的資訊是否存在轉送表內,如果沒有,那就會利用 flooding 的方式來廣播封包到其他的 bridge port,而這樣的方式,也就是大家所熟悉的廣播風暴(Broadcast)。

封包傳送到 Host B 與 C 之後,Host C 會直接忽視並丟棄此封包,而 Host B 辨識此目的位置是自己之後,就會送出回應訊息給 Host A,而 bridge 在轉送回應訊息封包的時候,就會將 Host B 與對應的連接埠,以及與 Host A 的連線資訊記錄在轉送表之中,之後 Host A 與 Host B 之間的連線,就會直接建立,bridge 也不會在傳送廣播封包給其他的 Host,而這也就是 bridge 學習與建立轉送表的過程。

''※ 區域網路的保護機制:Spanning-Tree Protocol''

然而,相信大家都知道,在區域網路內,廣播風暴是很佔頻寬的,且在區域網路內,也蠻容易因為某些因素而產生網路迴路(迴圈),所以為了避免發生這樣的狀況,則必須有一個保護與協調的機制,而這就是 Spanning-Tree Protocol(STP)。

Spanning-Tree Protocol(STP)是一種Level 2的網路通訊協定,主要作用在 Bridge(橋接器)或是 Switch(交換器)上,其最主要的目的,是當使用 Brisge 或 Switch 連結成網路時,來避免因為 Redundant Path(額外路徑或冗餘路徑)的機制,而造成網路迴路(loop)的狀況,以確保兩個節點(連接點)之間只有一條聯繫路徑存在。所以當 Bridge 或 Switch 有使用 STP 協定的時候,就能提供一個無迴圈的網路環境,也可以避免廣播封包情形的發生,保持區域網路的暢通。

{{op1{產生橋接網路裝置}}}
{{{
$ sudo brctl addbr mybr
}}}

''brctl'' : 產生橋接網路裝置的指令
''addbr'' : 產生橋接網路裝置,後方接著為裝置名稱,如 mybr

{{op1{查看橋接網路裝置 (br01) 的資訊}}}
{{{
$ brctl show mybr
bridge name	bridge id		STP enabled	interfaces
mybr		8000.000000000000	no		
}}}

''[註]'' 新建橋接網路裝置 mybr, 內定沒啟動 STP 功能, "bridge id" 中的 8000 是 id, 可是句點 (.) 後面是橋接裝置的 MAC 位址, 內定是 00:00:00:00:00:00  

{{op1{橋接網路裝置的內部網卡 (網卡名稱與橋接網路裝置同名)}}}
{{{
$ ifconfig mybr
mybr      Link encap:Ethernet  HWaddr 32:67:98:ec:f8:83  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)	
}}}

''[註]'' 內部網卡主要作為橋接網路裝置所建立網段封包管理之用. 如內部網卡沒啟動, 橋接網路裝置可不可以使用 ?  

{{op1{移除橋接網路裝置}}}
{{{
$ sudo brctl delbr mybr
}}}
<<toBalaNotes "2">>

///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. Linux-KVM: Taking Advantage of Memory Deduplication
http://blog.allanglesit.com/2011/03/linux-kvm-taking-advantage-of-memory-deduplication/
2. How to use the Kernel Samepage Merging feature
http://www.mjmwired.net/kernel/Documentation/vm/ksm.txt
3. Anatomy of Linux Kernel Shared Memory (一定要讀)
http://www.ibm.com/developerworks/linux/library/l-kernel-shared-memory/index.html?ca=dgr-lnxw01LX-KSMdth-LX
4. Increasing memory density by using KSM (KSM 創作者所寫的文章)
http://www.kernel.org/doc/ols/2009/ols2009-pages-19-28.pdf
5. Manage resources on overcommitted KVM hosts
http://www.ibm.com/developerworks/linux/library/l-overcommit-kvm-resources/
6. Automatic Memory Ballooning with MOM
http://aglitke.wordpress.com/2011/03/03/automatic-memory-ballooning-with-mom/

@@font-size:14pt;
Kernel Shared Memory 有時也被稱為 Kernel Samepage Merging 是在 linux 2.6.32 被加入 main tree 裡,簡單的話,

它的目地如 XEN 的 Memory CoW 與 VmWare 的 Transparent Page Sharing 一樣,把相同內容的 page merge 在一起, 

就是做 de-duplication 的動作, 讓記憶體空間更有效的被利用,當然相對地,需要花更多的 CPU 效能來做這些事,

不過相對上來說,應該還是值得的。@@

{{item1{KSM (Kernel Samepage Merging)}}}
本文網址 : http://www.linux-kvm.com/content/using-ksm-kernel-samepage-merging-kvm

Kernel SamePage Merging is a recent linux kernel feature which combines identical memory pages from multiple processes into one copy on write memory region. Because kvm guest virtual machines run as processes under linux, this feature provides the memory overcommit feature to kvm so important to hypervisors for more efficient use of memory. So if you need to run multiple virtual machines on a host where memory is a constraint, then KSM is your solution. Even in cases where memory is not a constraint you might have a particular application that you want separated onto individual virtual machines and want maximum scalability. One case that might come to mind is webhosting where you might want to maximize scalability. ''Redhat claims that tests with KSM were able to achieve 600 vms on a host with 48 cores and 256 GB RAM''.

[img[img/kvm/ksm.gif]]

{{item1{Verfiy Kernel KSM Support}}}
First thing you want to do is verify that KSM is available and enabled in your kernel. Future releases of the kernel will enable this by default but you should verify this at least for now. Run the following command to verify that KSM is enabled in your kernel.
{{{
$ grep KSM /boot/config-`uname -r`
CONFIG_KSM=y
}}}
You will also see a subdirectory for KSM under /sys/kernel/mm/KSM filesystem showing in-kernel values related to KSM.
{{{
$ ls -al /sys/kernel/mm/ksm/
總計 0
drwxr-xr-x 2 root root    0 2010-09-20 23:23 .
drwxr-xr-x 4 root root    0 2010-09-20 23:23 ..
-r--r--r-- 1 root root 4096 2010-09-20 23:23 full_scans
-rw-r--r-- 1 root root 4096 2010-09-20 23:23 max_kernel_pages       (Ubuntu 10.10 找不到此檔)
-r--r--r-- 1 root root 4096 2010-09-20 23:23 pages_shared
-r--r--r-- 1 root root 4096 2010-09-20 23:23 pages_sharing
-rw-r--r-- 1 root root 4096 2010-09-20 23:23 pages_to_scan
-r--r--r-- 1 root root 4096 2010-09-20 23:23 pages_unshared
-r--r--r-- 1 root root 4096 2010-09-20 23:23 pages_volatile
-rw-r--r-- 1 root root 4096 2010-09-20 18:13 run
-rw-r--r-- 1 root root 4096 2010-09-20 23:23 sleep_millisecs
}}}

''full_scans''
{{{
The value of full_scans indicates the number of times that all shareable memory areas have been scanned. If this number changes while other numbers (such as pages shared) do not, that indicates that KVM is not finding any new shareable memory even though it is looking for additional memory to share.
}}}
''pages_shared (實際共享 Page 的總數)''
{{{
The value of pages_shared indicates how many pages KSM is using to back the shared pool that it has built up. When you multiply this value by the page size (usually 4 KB), you can determine the total amount of memory that KSM is using.
}}}
''pages_sharing (加總各個虛擬電腦的共享 Page)''
{{{
The value of pages_sharing is the number of guest memory pages that KSM has shared using the shared pool. Some pages, such as randomized or encrypted data, can be shared few times, if at all. Other pages, such as zeroed pages, can be shared many times. KSM efficiency can be calculated by dividing the pages_sharing value by thepages_shared value. A higher result indicates more times that a page is shared by KSM, and a greater efficiency of KSM. 
}}}
''pages_to_scan''
{{{
The value of pages_to_scan determines how many memory pages KSM will analyze for sharing potential on each pass. The higher this value is, the more aggressively KSM has been configured to share memory. If the ksmtuned service is running, it will manipulate this value as needed. 
}}}
''pages_unshared''
{{{
The value of pages_unshared indicates how many pages KSM is scanning that cannot be shared because they are unique. KSM's wasted effort can be evaluated by dividing the pages_unshared value by the pages_sharing value. A lower result indicates less wasted effort on the part of KVM, and better KVM performance.
}}}
''pages_volatile''
{{{
The value of pages_volatile indicates the number of pages that have content that is changing too rapidly for memory sharing. If the number of volatile pages is high, that is an indication that the running guests are not good candidates for memory sharing. 
}}}
''run''
{{{
The value of run indicates the current state of KSM.

    0
        Indicates that KSM is not currently running, but any previously shared pages will remain shared.
    1
        Indicates that KSM is currently active and attempting to share additional memory.
    2
        Indicates that KSM has been stopped and all previously shared pages have been unshared. 
   
If the ksmtuned or ksm services are running, they will manipulate this value as needed.
}}}
''sleep_millisecs''
{{{
The value of sleep_millisecs value indicates how long KSM will sleep in between each scanning pass. The lower this value is the more aggressively KSM has been configured to share memory. If the ksmtuned service is running, it will manipulate this value based on the amount of system memory.
}}}
The aggressiveness of KSM memory scanning is determined by both the pages_to_scan and sleep_millisecs parameters. These two parameters combine to determine how often KSM wakes up to scan memory and for how long. The efficiency of KSM can be measured through analysis of the pages_shared, pages_sharing, pages_unshared, and pages_volatile metrics. You can experiment with KSM and determine your own appropriate thresholds for evaluating KSM based on these values and the calculations described.
<<toBalaNotes "ksm">>

{{item1{KSM Now Enabled in Ubuntu Lucid}}}
http://blog.dustinkirkland.com/2010/02/ksm-now-enabled-in-ubuntu-lucid.html

''I just uploaded a qemu-kvm package that enables KSM  by default on Ubuntu Lucid.''

''KSM'' is a [[bacronym|http://en.wikipedia.org/wiki/Bacronym]], for ''Kernel SamePage Merging''. Previously KSM stood for Kernel Shared Memory. KSM is a new feature of KVM, which can provide more efficient memory utilization on virtualization hosts. Basically, the host kernel tracks identical pages in memory, and stores only one copy when possible. If you're running several basically identical virtual machines, then you will likely have some identical pages in memory.

Ubuntu inherited these features from upstream with the merge of the ''Linux 2.6.32 kernel'' and the ''qemu-kvm 0.12.2'' package. Fedora 12 shipped with the KSM kernel pieces backported to their kernel.

''You can disable KSM, if you like, by editing /etc/default/qemu-kvm and then restarting qemu-kvm with sudo restart qemu-kvm.''
{{{
$ nano /etc/default/qemu-kvm
# To disable qemu-kvm's page merging feature, set KSM_ENABLED=0 and
# sudo restart qemu-kvm

KSM_ENABLED=1
#SLEEP_MILLISECS=2000
}}}
I did a bit of very rough testing of KSM in a test deployment of Ubuntu Enterprise Cloud. I had 1 Eucalyptus Node, a simple laptop, with a dual-core 2.4GHz and 4GB of memory. I registered a single Ubuntu 9.10 64-bit server image, and started instances with 256MB of memory apiece.

''With KSM disabled:''
{{{
* Running 0 VMs, the Node's memory utilization was steady around 12%
* Running 1 VM, the Node's memory utilization was steady around 18%
* Running 14VMs, the Node's memory utilization spiked and stabilized at 88%
}}}

''With KSM enabled:''
{{{
* Running 0 VMs, the Node's memory utilization was steady around 12%
* Running 1 VM, the Node's memory utilization was steady around 18%
* Running 14VMs, the Node's memory utilization was steady at 60%
  - with 18,000 - 20,000 pages shared
}}}
It looks to me that KSM "saved" me about 28% of my host's memory, which is a little over a gigabyte.

{{item1{測試 KSM}}}

''1. 測試前檢查''
{{{
- Upgrade to the latest qemu-kvm package
- Make sure that /sys/kernel/mm/ksm/run is set to 1 (the qemu-kvm upstart job will do this for you now)
}}}

''2. 啟動多部 Live CD 虛擬主機''
{{{
$ kvm -m 384 -cdrom multicore_4.0.iso -boot d&
}}}

''3. 檢視 KSM 運作資訊''
單行命令顯示所有檔案內容
{{{
# for ii in /sys/kernel/mm/ksm/* ; do echo -n "$ii: " ; cat $ii ; done
/sys/kernel/mm/ksm/full_scans: 151
/sys/kernel/mm/ksm/max_kernel_pages: 246793
/sys/kernel/mm/ksm/pages_shared: 92112
/sys/kernel/mm/ksm/pages_sharing: 131355
/sys/kernel/mm/ksm/pages_to_scan: 100
/sys/kernel/mm/ksm/pages_unshared: 123942
/sys/kernel/mm/ksm/pages_volatile: 1182
/sys/kernel/mm/ksm/run: 1
/sys/kernel/mm/ksm/sleep_millisecs: 20
}}}

[註] 每啟動一部虛擬電腦, pages_shared 及 pages_sharing 的值均會增加

''以 Mb 為單位顯示 pages_sharing 的值''
{{{
$ echo "KSM pages shared: $(( $(cat /sys/kernel/mm/ksm/pages_sharing) * $(getconf PAGESIZE) / 1024 / 1024 ))Mb"
}}}

Below is a small script (called ksm_stat) which I wrote in order to see how much memory is "shared" and how much memory is actually being saved by using this feature.
{{{
#!/bin/bash
if [ "`cat /sys/kernel/mm/ksm/run`" -ne 1 ] ; then
       echo 'KSM is not enabled. Run echo 1 > /sys/kernel/mm/ksm/run' to enable it.
       exit 1
fi
echo Shared memory is $((`cat /sys/kernel/mm/ksm/pages_shared`*`getconf PAGE_SIZE`/1024/1024)) MB
echo Saved memory is $((`cat /sys/kernel/mm/ksm/pages_sharing`*`getconf PAGE_SIZE`/1024/1024)) MB
if ! `type bc &>/dev/null`  ; then
        echo "bc is missing or not in path, skipping ratio calculation"
        exit 1
fi
if [ "`cat /sys/kernel/mm/ksm/pages_sharing`" -ne 0 ] ; then
        echo -n "Shared pages usage ratio is ";echo "scale=2;`cat /sys/kernel/mm/ksm/pages_sharing`/`cat /sys/kernel/mm/ksm/pages_shared`"|bc -q
        echo -n "Unshared pages usage ratio is ";echo "scale=2;`cat /sys/kernel/mm/ksm/pages_unshared`/`cat /sys/kernel/mm/ksm/pages_sharing`"|bc -q
fi
}}}

Example of a machine where it just has been enabled, so it takes a while until all pages are scanned
{{{
# ksm_stat
Shared memory is 23 MB
Saved memory is 76 MB
Shared pages usage ratio is 3.23
Unshared pages usage ratio is .45
}}}
<<toBalaNotes "1">>

{{item1{Getting an accurate view of process memory usage on Linux hosts}}}
本文網址 : http://prefetch.net/blog/index.php/2010/07/02/getting-an-accurate-view-of-process-memory-usage-on-linux-hosts/

Having debugged a number of memory-related issues on Linux, one thing I’ve always wanted was a tool to display proportional memory usage. Specifically, I wanted to be able to see how much memory was unique to a process, and have an equal portion of shared memory (libraries, SMS, etc.) added to this value. My wish came true a while back when I discovered the smem utility. When run without any arguments, smem will give you the resident set size (RSS), the unique set size (USS) and the proportional set size (PSS) which is the unique set size plus a portion of the shared memory that is being used by this process. This results in output similar to the following:
{{{
# smem -r
  PID User     Command                         Swap      USS      PSS      RSS 
 2879 libvirt-qemu /usr/bin/kvm -S -M pc-0.12         0    58620    84779   132140 
 4870 libvirt-qemu /usr/bin/kvm -S -M pc-0.12         0    51108    76936   120816 
 2009 root     /usr/bin/nxagent -D -name N        0    29816    31798    35108 
 2688 root     python /usr/local/share/vir        0    18996    20760    31908 
 1128 root     /usr/bin/X :0 -br -verbose         0    17076    17503    19368 
 2102 root     nautilus                           0    14404    16966    30752 
 2222 root     /usr/bin/python /usr/share/        0     8004     8824    16620 
                                                  :
}}}
To calculate the portion of shared memory that is being used by each process, you can add up the shared memory per process (you would probably index this by the type of shared resource), the number of processes using these pages, and then divide the two values to get a proportional value of shared memory per process. This is a very cool utility, and one that gets installed on all of my systems now!


///%ksm
//%/

///%1
//%/
''參考文章''
1. KVM network scripts
http://blog.bodhizazen.net/linux/kvm_network_scripts/
2. Libvirt Virtual Networking (@@color:red;很重要:有圖示說明@@)
http://wiki.libvirt.org/page/VirtualNetworking 

{{item1{建立 Libvirt 虛擬網路}}}

''1. 輸入網路名稱''

[img[img/kvm/mybr01.png]]

''2. 輸入 Network ID''

[img[img/kvm/mybr02.png]]

''3. 啟動 DHCP 服務''

[img[img/kvm/mybr03.png]]

@@color:red;''[註]'' 記得要將後面幾個 IP 位址保留給網路設備, 將 254 改成 249 @@

''4. 選擇隔離網路''

[img[img/kvm/mybr04.png]]

''5. 檢視最終設定''

[img[img/kvm/mybr05.png]]

''[註]'' 最後在 KVM 主戰機中, 會自動產生 virbr1 網路介面, IP 位址會是所設 Network ID 的第一個位址, 如上例, IP 位址為 192.168.100.1, 當配置 IP 位址給 Client 端, 還會一併指定 192.168.100.1 是 Client 端的 ''Default Gateway'', ''DNS Server'' 

<<toBalaNotes "1">>
{{item1{解析 Libvirt 網路架構}}}

''1. 新建 Bridge 網路設備''
上述所建立的網路架構, 主戰機會自動產生一片網卡 (virbr1), 連接管理這個 Bridge 網路設備, 這樣的網路架構與 VMWare 的 Host-Only 虛擬網路是一樣的.而 virbr1 是一個 Bridge, 由以下命令得知 : 
{{{
$ brctl show
bridge name	bridge id		STP enabled	interfaces
virbr0		8000.000000000000	yes		
virbr1		8000.000000000000	yes	
}}}

''2. 新建二片 Bridge 專屬網卡''

[img[img/kvm/mybr06.png]]

''3. 網路功能設定 (iptables)''

至於虛擬網路的功能 (NAT, Routed), 則是由 iptables 來執行, 檢視 iptables 規則, 命令如下 :

[img[img/kvm/mybr07.png]]

''4. 新建 DHCP 服務 (dnsmasq)'' 

[img[img/kvm/mybr08.png]]

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
''參考文章''
1. ARM聯盟宣佈成立Linaro
http://www.linuxpilot.com/industry/news/kiji/20100606
2. Linaro誕生 Linux平台大戰白熱化 ?
http://www.eettaiwan.com/ART_8800608863_676964_NT_25ea892d.HTM

官方網址 : http://www.linaro.org/

英國IC 設計公司安謀(ARM Holdings Plc)2日發布新聞稿宣布,該公司與飛思卡爾半導體(Freescale Semiconductor)、IBM、三星電子(Samsung  Electronics)、ST-Ericsson與德州儀器(Texas Instruments Incorporated)攜手成立非營利的開放原始碼軟體工程公司「Linaro」,主要目的在活絡開放原始碼的創新事業,以便迎接下一波「持續連線、隨時開機(always-connected, always-on)」的運算時代來臨。根據新聞稿,Linaro將幫助程式開發商與硬體製造商提供消費者更多選擇、更多感應裝置以及更多樣化的Linux系統應用程式。

安謀指出,Linaro 將集業界領導電子廠商的專業,加速Linux開發商對系統單晶片 (SoC) 的開發速度;能夠持續連線、隨時開機的電子裝置需要複雜的SoC才能達成消費者要求的運算效能與低耗電量。

根據新聞稿,Linaro 將對開放原始碼計畫投入資源,而這些計畫之後則可運用於 Android、LiMo、MeeGo、Ubuntu與 webOS等基於 Linux 系統產品推廣。此外,Linaro也計畫每6個月釋出獲得多種 SoC 驗證的最佳化工具、核心原始碼(kernel) 以及中介軟體(Middleware),以便為通路商、開發商提供穩定且最佳化的基礎。

Linaro的軟體與工具平台將適用於多種市場,可縮短智慧型手機、平板電腦、數位電視、車內娛樂與企業設備等產品的上市時間。安謀指出,Linaro的首批軟體與工具預定將在2010年11月釋出,並將為最新款ARM CortexTM-A系列處理器提供最佳化程式。

<<toBalaNotes "linaro">>


///%linaro
//%/
{{item1{Linux 雲端航母系統架構}}}

[img[img/ubkvm1204.png]]

<<toBalaNotes "1">>
{{item1{安裝 Linux 雲端航母系統}}}
''1. 將 UB1204.zip 解壓縮至 C 磁碟機的根目錄''

''2. 使用 VMware Workstation 開啟 UB1204 虛擬主機, 並檢視是否可使用 VT-x 虛擬技術''

[img[img/hadoop/ubhd12/ubhd01.png]]

''3. 使用 VMware Workstation 啟動 UB1204 虛擬主機''
務必點選 "I copied it", 產生新的虛擬主機

[img[img/hadoop/ubhd12/ubhd02.png]]

''4. 登入 UB1204 虛擬主機''
登入帳號是 student, 密碼是 student

[img[img/ub120401.png]]

<<toBalaNotes "2">>


///%1
//%/

///%2
//%/

{{item1{下載 XAMPP}}}

''$ wget  http://sourceforge.net/projects/xampp/files/XAMPP%20Linux/1.8.1/xampp-linux-1.8.1.tar.gz/download''
{{{
--2009-09-01 00:20:56--  http://192.168.200.1/xampp-linux-1.7.3a.tar.gz
正在連接 192.168.200.1:80... 連上了。
已送出 HTTP 要求,正在等候回應... 200 OK
長度: 65092947 (62M) [application/octet-stream]
Saving to: `xampp-linux-1.7.2.tar.gz'

100%[================================================>] 65,092,947  3.34M/s   in 17s

2009-09-01 00:21:14 (3.64 MB/s) -- 已儲存 ‘xampp-linux-1.7.3a.tar.gz’ [65092947/65092947])
}}}

{{item1{安裝 XAMPP}}}
{{{
$ su
密碼:
# tar xvfz xampp-linux-1.7.3a.tar.gz -C /opt
}}}

{{item1{啟動 XAMPP}}}

''# /opt/lampp/lampp start''
{{{
Starting XAMPP for Linux 1.7.3a...
XAMPP: Starting Apache with SSL (and PHP5)...
XAMPP: Starting MySQL...
XAMPP: Starting ProFTPD...
XAMPP for Linux started.
}}}

[[1.Tiny Core 網路設定 |http://linuxkvm.blogspot.com/2011/07/tiny-core_24.html]]

[[2. Tiny Core 套件安裝 |http://linuxkvm.blogspot.com/2011/07/tiny-core_30.html]]

[[3. Tniy Core SSH Server |http://linuxkvm.blogspot.com/2011/07/tniy-core-ssh-server.html]]

[[4. Tiny Core 自動登入 root 帳號 |http://linuxkvm.blogspot.com/2011/07/tiny-core-root.html]]

<<toBalaNotes "1">>


///%1
//%/
官方網址 : http://www.tinyos.net/

{{item1{TinyOS 無線傳感網路設計平台}}}
''TinyOS'' is an open-source operating system designed for ''wireless embedded sensor networks''. It features a component-based architecture which enables rapid innovation and implementation while minimizing code size as required by the severe memory constraints inherent in sensor networks. TinyOS's component library  includes network protocols, distributed services, sensor drivers, and data acquisition tools – all of which can be used as-is or be further refined for a custom application. TinyOS's event-driven execution model enables fine-grained power management yet allows the scheduling flexibility made necessary by the unpredictable nature of wireless communication and physical world interfaces.

TinyOS has been ported to over a dozen platforms and numerous sensor boards. A wide community uses it in simulation to develop and test various algorithms and protocols. New releases see over 10,000 downloads. Over 500 research groups and companies are using TinyOS on the Berkeley/Crossbow Motes. Numerous groups are actively contributing code to the sourceforge site and working together to establish standard, interoperable network services built from a base of direct experience and honed through competitive analysis in an open environment.

Want to get started with TinyOS? Check out a fraction of the hundreds of TinyOS projects on our Related Work page. Then, get started on your own project at the Download page. 

<<toBalaNotes "tinyos">>

本文網址 : http://www.libnet.sh.cn:82/gate/big5/www.istis.sh.cn/list/list.aspx?id=3216

TinyOS 是 UC Berkeley(加州大學伯克利分校)開發的開放源代碼作業系統,專為嵌入式無線傳感網路設計,作業系統基於構件(component-based)的架構使得快速的更新成為可能,而這又減小了受傳感網路記憶體限制的代碼長度。TinyOS 的構件包括網路協議、分佈式伺服器、感測器驅動及數據識別工具。其良好的電源管理源於事件驅動執行模型,該模型也允許時序安排具有靈活性。TinyOS 已被應用於多個平臺和感應板中。

TinyOS 相關網站:
http://arts.ecs.umass.edu/~hkumar/presentations.htm
http://sourceforge.net/projects/tinyos/
 
TinyOS 目前的最新版本為TinyOS 2.0 beta2(計劃在今年年底前發佈TinyOS 2.0的非測試版本),該版本新增加的特點為:
支援 mica2、micaZ、Telos revB/TMote Sky、IntelMote2、eyes 和 tinynode平臺;
兩個多次反射協議,Collection 和 Dissemination;
一個更加完善的源仲裁構架(resource arbitration framework),集成週邊電源管理;
mica 和 telos平 臺族的非易失性存儲;
三個總體記憶體管理元件:Pool、Queue 和 Cache;
顯著增強的傳感支援;
mica 族和 IntelMote2 的有效 I2C 棧。
 
目前有多個採用 TinyOS 的研究項目,如 UCLA(加州大學洛杉磯分校)的 Shahin Farshchi 在進行一項以 TinyOS 為基礎的無線神經界面研究。這樣的系統在100Hz/頻道的採樣頻率下可傳感、放大、傳輸神經信號,系統小巧、成本低、重量輕、功率小。系統要求一個接收器接收、解調、顯示傳輸的神經信號。在每秒8bit的採樣率下,系統的速度可達5600。該速度可保證8個 EEG頻道、或1個速度為每秒5.6K採樣頻道的可靠傳輸。研究者目前的奮鬥目標是提高該基於TinyOS的傳感網路的數據傳輸速度,設計與被測對象連接的前端神經放大電路。
http://www.ee.ucla.edu/~judylab/research/projects/Shahin/index.htm

路易斯安娜州立大學和位於 Baton Rouge 的南方大學的 Nian-Feng Tzeng 博士正在研究應用於石油/氣體開發和管理的UcoMS(Ubiquitous Computing and Monitoring System,泛計算和監控系統)。該系統適用於傳感網路、無線通信和網格計算,主要功能包括幫助鑽孔、操作數據記錄和處理、線上平臺資訊發佈和顯示、設備監控/入侵檢測、地震處理、複雜表面設備和管道的管理。也可使用UCoMS監控、維護淘汰的平臺。
http://www.ucoms.org

另外,Freescale正在其Zigbee開發板上測試TinyOS和TinyDB。
波士頓大學的Wei Li將其用於傳感網路的控制和優化:
http://people.bu.edu/wli
 
Brilliant Technology將其用於無線傳感網路進行結構健康監測:
http://www.tBrilliant.com
其他更多應用情況參見:
http://webs.cs.berkeley.edu/users/select_users.php

///%tinyos
//%/
TurnKey 官方網站 : http://www.turnkeylinux.org/

{{item1{認識 TurnKey File Server - Simple Network Attached Storage}}}
An easy to use file server that combines Windows-compatible network file sharing with an advanced web based file manager and includes support for ''SMB'', ''SFTP'' and rsync file transfer protocols. The server is configured to allow server users to manage files in private or public storage. Based on Samba and eXtplorer.

''Default credentials:''

Webmin, Webshell, SSH, Samba: username ''root'', no password
- user sets password during installation

'Web based file manager (eXtplorer):
- username ''admin'', password ''turnkey''
- username ''guest'', password ''turnkey''
- Local FTP authentication is done using UNIX users

''下載網址 :'' http://www.turnkeylinux.org/fileserver

{{item1{安裝 TurnKey 檔案伺服器}}}

[img[img/turnkey/tfs01.png]]

[img[img/turnkey/tfs02.png]]

[img[img/turnkey/tfs03.png]]

[img[img/turnkey/tfs04.png]]

[img[img/turnkey/tfs05.png]]

<<toBalaNotes "1">>

{{item1{建立 TurnKey 檔案虛擬主機}}}

[img[img/turnkey/tfsvm01.png]]

[img[img/turnkey/tfsvm02.png]]

[img[img/turnkey/tfsvm03.png]]

[img[img/turnkey/tfsvm04.png]]

[img[img/turnkey/tfsvm05.png]]

[img[img/turnkey/tfsvm06.png]]

[img[img/turnkey/tfsvm07.png]]

[img[img/turnkey/tfsvm08.png]]

[img[img/turnkey/tfsvm09.png]]

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
本文網址 : http://nicaliu.info/2010/07/15/2992/

譯者:Nica < nicaliu at gmail dot com >
.
開放源碼 KVM 虛擬化技術已有相當多廠商開始推動,大部份是拿來當作輔助雲端佈署的機制。這些支援 KVM 的廠商裡最大的就是 Linux 的 Red Hat 與 Ubuntu 了 – 不過它們推的那些解決方案不見得是在雲端佈署時必須使用。
.
主機供應商 The Planet 近期推動新的雲端服務,提供以 KVM 技術建置的服務,但未利用到 Ubuntu Enterprise Cloud (UEC) 或 Red Hat’s Enterprise Virtualization (RHEV)。The Planet 選擇以自已的方式,用的是免費的 Ubuntu Lucid LTS,且不使用 Canonical – Ubuntu 背後的贊助商,所提供的付費諮詢。
.
「在 hypervisor 這層,就我們想訂的價格點而言,商用軟體並非我們的選項」 The Planet 雲端服務產品管理的資深經理 Carl Meadows 對 InternetNews.com 如此解釋道。
.
Meadows 進一步說明,他們的團隊著眼於開放源碼平台的兩個主角 Xen 與 KVM,在檢視這些選項後,得出的結論是:Xen 社群在開發社群這一塊並沒有像 KVM 那樣的凝聚力,基於此,The Planet 的雲端會在 KVM 上作標準化,而 Meadows 也認為至今這個決定仍是對的方向。
.
KVM 本身即為 Linux kernel 的一部份,同時也是雲端佈署的一塊,The Planet 的雲端用的是近期釋出的 Ubuntu Lucid 版本。Ubuntu Lucid 所能提供的重點之一就是 Ubuntu Enterprise Cloud。Ubuntu Enterprise Cloud 建立於 Eucalyptus 開放源碼專案上,作用就是拿來作雲端管理與佈署。
.
Canonical 提供付費的雲端佈署服務,乃是由前任 MySQL CEO Marten Mickos 所領導的商業版 Eucalyptus 。The Planet 最後並未將 Eucalyptus 納入成為其雲端佈署的一部份。
.
「我們評估過 Eucalyptus,但對就我們的需求而言它太過綁手綁腳了」Meadows說道「它其實是仿 Amazon 的 EC2 所設計,而我們並不是要重建一個 EC2。用 Eucalyptus 來建置,作出來就是一個 Eucalyptus 雲端,但我們真正想要的是為客戶設計一個符合他們需求的虛擬化平台。」
.
此外,雖然 The Planet 選擇以 Ubuntu 作 KVM 的 Linux 分支套件,但並未用到 Canonical 的付費合約。
.
「它們典型的付費技術支援對我們而言並沒有實質用處,因為我們擁有 600 至 1000 台主機執行一模一樣的東西,」Meadows 說道「所以當我們遇到 bug 時,相同的 bug 會出現在所有的系統上,而各伺服器有其個別許可權執行技術支援,因此我們的運作架構不適於此法。」
.
Meadows 提醒道:雖然 The Planet 與 Canonical 並沒有技術上的合作關係,但彼此已對可行的商業性合作方案進行討論,只是還沒有適切的結論。
.
「它們想要的只是一個用它們分支套件的雲端廠商與服務供應商」Meadows 說道「There is no money changing hands yet.」
.
此外,The Planet 會提交 bug 至開放源碼的 Ubuntu 專案 – 一般情況下實在用不到太多的技術支援。再依據與 Ubuntu 溝通後的結果,依據雲端佈署架構,執行技術性動作。
.
許多大型雲端成功案例
The Planet 亦曾考慮使用 Red Hat 的 KVM 建置雲端。迄今為止,Red Hat 宣稱已有相當多大型雲端成功案例,還擁有經過認證的雲端供應商程式,包括 IBM、Amazon、NTT 與 Saavis 等都有。
.
Meadows 解釋,經由他試著理解出 Red Hat 雲端基礎架構的建置方式後,發現對 The Planet 而言 Red Hat 的方式並不適用。
.
「Red Hat 要我們在執行 Red Hat KVM 並於取得 Red Hat guest 上線前,使用其特定的許可權,這並非我們負擔得起」Meadows 說道「對我們而言它並不能創造超越 Ubuntu 的價值。」
<<toBalaNotes "1">>



///%1
//%/
{{item1{啟用 VNC 連接}}}

''1. 移除原先 VNC 裝置''

[img[img/kvm/vnc01.png]]

''2. 新增 VNC 裝置''

[img[img/kvm/vnc02.png]]

''3. 設定 VNC Port''

[img[img/kvm/vnc03.png]]

''4. 檢視 VNC 裝置最後設定''

[img[img/kvm/vnc04.png]]

{{item1{連接遠端桌面}}}

''virt-viewer 命令''
{{{
# 本機連接
$ virt-viewer -c qemu+ssh:///system TC88

# 遠端連接
$ virt-viewer -c qemu+ssh://kok@192.168.5.90/system baseserver

}}}

''圖形工具連接''

[img[img/kvm/vDesktop02.png]]

[img[img/kvm/vDesktop03.png]]

[img[img/kvm/vDesktop04.png]]

<<toBalaNotes "1">>

{{item1{SPICE}}}
網址 : http://www.spice-space.org/

The Spice project aims to provide a complete open source solution for interaction with virtualized desktop devices.The Spice project deals with both the virtualized devices and the front-end. Interaction between front-end and back-end is done using VD-Interfaces. The VD-Interfaces (VDI) enable both ends of the solution to be easily utilized by a third-party component. 


///%1
//%/

官方網址 :  http://msdn.microsoft.com/zh-tw/azure/default.aspx

The ''Windows Azure platform'' is a set of cloud computing services that can be used together or independently that enable:

- Developers use existing skills and familiar tools to develop cloud applications
- ISVs and System Integrators rapidly reach market and pay as you go
- IT Managers gain access to a new set of resources without adding complexity
- Businesses of all sizes to quickly respond as business needs change

{{item1{Azure 運作架構}}}

[img[img/xaas/Azure.jpg]]

Microsoft Windows Azure 平台是由 Microsoft 資料中心所承載的一個網際網路規模層級的雲端服務平台。 它提供了 Windows Azure 作業系統、NET 服務與 SQL Azure。

<<toBalaNotes "azure">>


///%azure
//%/
{{item1{iClass 系統執行與離開}}}

''1. 執行 iClass 管理系統''

$ ./iclass.sh
{{{
====================== iClass 管理系統 (V 0.3) =======================
 超級使用者資訊 (帳號未產生)

 本機資訊
   IP 位址 : 192.168.99.6
   Gateway 位址 : 192.168.99.254
   DNS 位址 : 168.95.1.1
==========================================================================
[1] 批次建立使用者帳號
[2] 批次刪除使用者帳號
[3] 批次更新使用者家目錄 (/home) 資料
[4] 更新使用者登入清單 (Samba)
[5] 學習評量
[6] 建立與刪除超級使用者帳號 (oc9root)
[7] 編輯設定檔 (/root/iclass/conf/oc9.conf)
[8] 離開

輸入代號, 執行所需的功能 : 
}}}

''2. 離開 iClass 管理系統''

在選單中, 輸入 8, 按 Enter 鍵, 會出現以下訊息 :
{{{
確定離開 (y/n) ?
}}}

輸入 "y", 離開 iClass 管理系統

<<toBalaNotes "1">>

{{item1{建立與刪除超級使用者帳號}}}

''1. 執行 iclass.sh''
{{{
$ sudo ./iclass.sh
}}}

''2. 編輯設定檔''

輸入 "7", 按 Enter 鍵, 開始編輯 conf/oc9.conf (內定設定檔), 修改 <CloudAdmin> 標籤內容
{{{
<?xml version="1.0" encoding="utf-8" ?>
<oncloud9>
  <CloudAdmin>
    Title:私有知識雲管理者
    Date:2011-02-31
    SuperUser:雲行者
    LoginName:oc9root
    Password:student
    EMail:oc9root@gmail.com
  </CloudAdmin>

  <!--
    0: 一般目錄
    1: 評量目錄
    2: 系統目錄
    3: 網站
  --> 
  <CloudDir>
    <![CDATA[bok:0]]>
    <![CDATA[app:0]]>
    <![CDATA[exam:1]]>
    <![CDATA[sys:2]]>
    <![CDATA[www:3]]>
  </CloudDir>

  <CloudUser>
    lcj01:student:張三逢:lcj01@gmail.com
    lcj02:student:陳漢點:lcj02@gmail.com
    lcj03:student:鄭如意:lcj03@gmail.com
    lcj04:student:真抱歉:lcj04@gmail.com
  </CloudUser>

  <CloudDB>
    ServerIP:192.168.200.1
    ServerPort:5948
  </CloudDB>

  <CloudTest>
    SCJP1.5
    SCJP1.6
    LPIC
  </CloudTest>

</oncloud9>
}}}

''3. 建立超級使用者''

在下面選單, 輸入 "6", 建立超級使用者
{{{
====================== iClass 管理系統 (V 0.3) =======================
 超級使用者資訊 (帳號未產生)

 本機資訊
   IP 位址 : 192.168.99.6
   Gateway 位址 : 192.168.99.254
   DNS 位址 : 168.95.1.1
==========================================================================
[1] 批次建立使用者帳號
[2] 批次刪除使用者帳號
[3] 批次更新使用者家目錄 (/home) 資料
[4] 更新使用者登入清單 (Samba)
[5] 學習評量
[6] 建立與刪除超級使用者帳號 (oc9root)
[7] 編輯設定檔 (/root/iclass/conf/oc9.conf)
[8] 離開

輸入代號, 執行所需的功能 : 6


是否要建立 oc9root 帳號 (y/n) ? y

oc9root 使用者帳號建立成功
oc9root 加入 admin 群組成功
新增 oc9root (Samba) 使用者帳號成功
./bok 建立成功
./app 建立成功
./exam 建立成功
./sys 建立成功
./www 建立成功
oc9root 豆知識筆記本網站建立成功 (www)
oc9root 評量網站建立成功 (exam)

產生 oc9root 的 oc9info.txt 檔案

按任何鍵, 回到主選單...
}}}

''4. 檢視超級使用者的家目錄''
{{{
# sudo apt-get install tree

# tree /home/oc9root/
/home/oc9root/
├── app
├── bok
├── exam
│  ├── LPIC
│  ├── SCJP1.5
│  └── SCJP1.6
├── sys
│  ├── oc9info.txt
│  └── users
└── www

8 directories, 2 files

}}}

{{item1{新增使用者帳號}}}

''1. 編輯設定檔''

輸入 "7", 按 Enter 鍵, 開始編輯 conf/oc9.conf (內定設定檔), 在 <CloudUser> 標籤中新增使用者 (lcj04) 資訊, 如下 :
{{{
  <CloudUser>
    lcj01:student:張三逢:lcj01@gmail.com
    lcj02:student:陳漢點:lcj02@gmail.com
    lcj03:student:鄭如意:lcj03@gmail.com
    lcj04:student:真抱歉:lcj04@gmail.com
  </CloudUser>
}}}

''2. 開始建立使用者帳號''

輸入 "1", 按 Enter 鍵, 開始批次建立所有使用者帳號
{{{
====================== 行動網站教學系統 (V 0.2) =======================
 超級使用者資訊 
   帳號 : oc9root
   網路磁碟機 : \\192.168.99.6\oc9root
   網址 : http://192.168.99.6/oc9root

 本機資訊
   IP 位址 : 192.168.99.6
   Gateway 位址 : 192.168.99.254
   DNS 位址 : 168.95.1.1
==========================================================================
[1] 批次建立使用者帳號
[2] 批次刪除使用者帳號
[3] 批次更新使用者家目錄 (/home) 資料
[4] 更新使用者登入清單 (Samba)
[5] 學習評量
[6] 建立與刪除超級使用者帳號 (oc9root)
[7] 編輯設定檔 (/root/iclass/conf/oc9.conf)
[8] 離開

輸入代號, 執行所需的功能 : 1


確定要建立全部帳號 (y/n) ? y

/etc/skel/bok 建立成功
/etc/skel/app 建立成功
/etc/skel/exam 建立成功
/etc/skel/sys 建立成功
/etc/skel/www 建立成功

lcj01 使用者帳號建立成功
新增 lcj01 (Samba) 使用者帳號成功
產生 lcj01 的 oc9info.txt 檔案

lcj02 使用者帳號建立成功
新增 lcj02 (Samba) 使用者帳號成功
產生 lcj02 的 oc9info.txt 檔案

lcj03 使用者帳號建立成功
新增 lcj03 (Samba) 使用者帳號成功
產生 lcj03 的 oc9info.txt 檔案

lcj04 使用者帳號建立成功
新增 lcj04 (Samba) 使用者帳號成功
產生 lcj04 的 oc9info.txt 檔案

按任何鍵, 回到主選單... 

}}}

''3. 檢視使用者的家目錄''
{{{
# tree /home
/home
├── lcj01
│  ├── app
│  ├── bok
│  ├── exam
│  ├── sys
│  │  └── oc9info.txt
│  └── www
├── lcj02
│  ├── app
│  ├── bok
│  ├── exam
│  ├── sys
│  │  └── oc9info.txt
│  └── www
├── lcj03
│  ├── app
│  ├── bok
│  ├── exam
│  ├── sys
│  │  └── oc9info.txt
│  └── www
├── lcj04
│  ├── app
│  ├── bok
│  ├── exam
│  ├── sys
│  │  └── oc9info.txt
│  └── www
├── oc9root
│  ├── app
│  ├── bok
│  ├── exam
│  │  ├── LPIC
│  │  ├── SCJP1.5
│  │  └── SCJP1.6
│  ├── sys
│  │  ├── oc9info.txt
│  │  └── users
│  └── www
└── student

34 directories, 6 files

}}}

<<toBalaNotes "2">>
{{item1{批次壓縮及備份個人網站}}}
''參考文章''
1. Example uses of the Linux Command zip
http://linux.about.com/od/commands/a/blcmdl1_zipx.htm
2. 關於來自windows中文zip壓縮檔的問題
http://kawsing.blogspot.com/2009/06/windowszip.html

{{op1{檢視目前登入環境的語系}}}
{{{
# echo $LANG
zh_TW.UTF-8
}}}

{{op1{撰寫程式}}}

此程式必須在 zh_TW.UTF-8 語系中執行, 並且在系統要先安裝 convmv 命令套件. zipwww.sh 會將家目錄中的 www 目錄壓縮, 壓縮後的檔案, 會複製回 www 目錄

''$ nano zipwww.sh''
{{{
#!/bin/bash

[ "$USER" != "root" ] && echo "要 root 權限" && exit 1
[ "$LANG" != "zh_TW.UTF-8" ] && echo "語系不對, 必須是 zh_TW.UTF-8" && exit 1
which convmv &>/dev/null
[ "$?" != "0" ] && echo "請安裝 convmv (apt-get install convmv)" && exit 1

u=$(ls /home)
echo "" >/tmp/convmv.msg

for z in $u
do
   [ -d /tmp/www ] && rm -r /tmp/www &>/dev/null

   if [ -d /home/$z/www ]; then
      cp -rP /home/$z/www /tmp/www &>/dev/null

      convmv -r --notest -f utf-8 -t big5 /tmp/www &>>/tmp/convmv.msg
      [ "$?" != "0" ] && echo "轉碼失敗" && exit 1

      cd /tmp/ &>/dev/null
      zip -r $z www &>/dev/null
      [ "$?" != "0" ] && echo "$z.zip 壓縮失敗" && exit 1

      cp $z.zip /home/$z/
      chown $z:$z /home/$z/$z.zip
      [ "$?" == "0" ] && echo "$z.zip 產生成功"
    fi

done
}}}

<<toBalaNotes "3">>


///%1
//%/

///%2
//%/

///%3
//%/

''問題 1 :'' 在 Ubuntu 10.10 版本, 安裝 KVM 後, 可使用 kvm 去安裝 Ubuntu Server 10.04.1, 但無法啟動它 ?

''解答 1 :'' 目前還沒解決 (2010/11/14) , 說明如下 :
{{{
Binary package hint: qemu-kvm After upgrade from 10.04 -> 10.10 all the libvirt domains did not boot and the cpu usage went 100%. Trying to isolate the problem I found that kvm on maverick is unable to boot ubuntu 10.04 i386 virtual minimal setup. Other guests versions booted just fine. I even moved the image to another computer with also maverick edition and tried to boot the images there with no success. I then tried to make a new guest installation using 10.04.1 i386 server iso with 'minimal virtual' option ON. The setup went just fine but trying to boot the fresh install ended on the same thing. Trying to do the same with a debian guest worked with no problems. The actual effect is that KVM opens the disk images grub is executed and after that black screen (on guest) and 100% cpu usage on host. Notice: all hosts and guests are i386. 
}}}
網址 : https://bugs.launchpad.net/ubuntu/+source/qemu-kvm/+bug/666180/+activity

http://ubuntuforums.org/archive/index.php/t-1567147.html

''問題 2 : 虛擬機器管理員啟動後, 顯示以下錯誤資訊''
{{{
Unable to open a connection to the libvirt management daemon.

Libvirt URI is: qemu:///system

Verify that:
 - The 'libvirtd' daemon has been started
}}}

''解答 2 : 將新建帳號加入 sudo  及 libvirtd 群組''
{{{
$ sudo usermod -a -G libvirtd kvm01
$ sudo usermod -a -G sudo kvm01
}}}

''問題 3 : Disable virbr0 NAT Interface''
The virtual network (virbr0) used for Network address translation (NAT) which allows guests to access to network services. However, NAT slows down things and only recommended for desktop installations. 

''解答 3 : 解決方法如下''

1. Display Current Setup

{{{
# ifconfig

virbr0    Link encap:Ethernet  HWaddr 00:00:00:00:00:00
          inet addr:192.168.122.1  Bcast:192.168.122.255  Mask:255.255.255.0
          inet6 addr: fe80::200:ff:fe00:0/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:39 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 b)  TX bytes:7921 (7.7 KiB)
}}}

Or use the following command:
{{{
# virsh net-list

Name                 State      Autostart
-----------------------------------------
default              active     yes
}}}

To disable virbr0, enter:
{{{
# virsh net-destroy default
# virsh net-undefine default
# service libvirtd restart
}}}

''問題 4 : Libvirt connection does not have interface support ''
這訊息出現在 virt-manager 管理工具中

''解答 4 : ''
This means either your local libvirtd (and/or remote libvirtd if connecting to a remote machine) are older than 0.7.0 when interface management was introduced, or they aren't compiled with interface support.

Your distro may not have compiled libvirt support because netcf, the underlying library, only supports redhat style /etc/sysconfig network scripts at the moment.

<<toBalaNotes "1">>

///%1
//%/
<<forEachTiddler 
 where
   'tiddler.tags.contains("toBala")'
>>
''參考文章''
1. 用 VirtualBox 安裝 Android-x86 4.0
http://android.cool3c.com/article/54626

{{item1{下載安裝光碟 (請選擇 eeepc 版本)}}}

下載網址 : https://sites.google.com/a/android-x86.org/web/releases/releasenote-4-0-rc2

{{item1{建立 Android X86 虛擬電腦}}}

[img[img/kvm/ax01.png]]

[img[img/kvm/ax02.png]]

[img[img/kvm/ax03.png]]

[img[img/kvm/ax04.png]]

{{item1{開始安裝 Android X86 系統}}}

[img[img/kvm/ax07.png]]

[img[img/kvm/ax08.png]]

[img[img/kvm/ax09.png]]

[img[img/kvm/ax10.png]]

[img[img/kvm/ax11.png]]

[img[img/kvm/ax12.png]]

[img[img/kvm/ax13.png]]

[img[img/kvm/ax14.png]]

[img[img/kvm/ax15.png]]

[img[img/kvm/ax16.png]]

[img[img/kvm/ax17.png]]
<<toBalaNotes "1">>
{{item1{Android x86 虛擬電腦週邊裝置設定}}}
''1. 虛擬網卡需選擇 pcnet''

[img[img/android/AX109VM01.png]]

''2. 顯示系統需選擇 vga''

[img[img/android/AX109VM02.png]]

///%1
//%/
''參考文章''
1. Installing_on_Ubuntu
http://wiki.apache.org/couchdb/Installing_on_Ubuntu

{{item1{開始安裝 CouchDB}}}

{{op1{1. 安裝 CouchDB 所需的相依套件 (1)}}}
{{{
# sudo apt-get build-dep couchdb
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列【新】套件將會被安裝:
  cdbs comerr-dev erlang-dev fdupes help2man krb5-multidev
  libcurl4-openssl-dev libdbus-1-dev libdbus-glib-1-dev libgssrpc4 libicu-dev
  libidn11-dev libiw-dev libkadm5clnt-mit7 libkadm5srv-mit7 libkdb5-4
  libkrb5-dev libldap2-dev libncurses5-dev libnotify-dev libnspr4-dev
  libnss3-dev libreadline-dev libreadline6-dev libssl-dev xulrunner-1.9.2-dev
  xulrunner-dev
升級 0 個,新安裝 27 個,移除 0 個,有 18 個未被升級。
需要下載 16.5MB 的套件檔。
此操作完成之後,會多佔用 71.1MB 的磁碟空間。
是否繼續進行 [Y/n]?
}}}

''[註]'' 
Turnkey 虛擬電腦, 需修改 /etc/apt/sources.list.d/sources.list 檔案內容,  內容如下 :
{{{
deb http://archive.turnkeylinux.org/ubuntu lucid main

deb http://tw.archive.ubuntu.com/ubuntu lucid main
deb-src http://tw.archive.ubuntu.com/ubuntu lucid main                   # 增加此行
deb http://tw.archive.ubuntu.com/ubuntu lucid universe
deb-src http://tw.archive.ubuntu.com/ubuntu lucid universe               # 增加此行
# deb http://tw.archive.ubuntu.com/ubuntu lucid restricted
# deb http://tw.archive.ubuntu.com/ubuntu lucid multiverse
                                                   :
}}}

{{op1{2. 安裝 CouchDB 所需的相依套件 (2)}}}
{{{
# sudo apt-get install libicu-dev libcurl4-gnutls-dev libtool 
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
libicu-dev 已經是最新版本了。
以下套件是被自動安裝進來的,且已不再會被用到了:
  libssl-dev
使用 'apt-get autoremove' 來將其移除。
下列的額外套件將被安裝:
  libltdl-dev
建議套件:
  libcurl3-dbg libtool-doc gfortran fortran95-compiler gcj
下列套件將會被【移除】:
  libcurl4-openssl-dev
下列【新】套件將會被安裝:
  libcurl4-gnutls-dev libltdl-dev libtool
升級 0 個,新安裝 3 個,移除 1 個,有 18 個未被升級。
需要下載 1,679kB 的套件檔。
此操作完成之後,會多佔用 2,306kB 的磁碟空間。
是否繼續進行 [Y/n]?
}}}

{{op1{3. 下載 CouchDB 安裝檔}}}
{{{
# wget http://ftp.stut.edu.tw/var/ftp/pub/OpenSource/apache//couchdb/1.0.2/apache-couchdb-1.0.2.tar.gz
--2011-02-23 03:41:50--  http://ftp.stut.edu.tw/var/ftp/pub/OpenSource/apache//couchdb/1.0.2/apache-couchdb-1.0.2.tar.gz
Resolving ftp.stut.edu.tw... 163.26.222.172
Connecting to ftp.stut.edu.tw|163.26.222.172|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1031474 (1007K) [application/x-gzip]
Saving to: `apache-couchdb-1.0.2.tar.gz'

100%[======================================>] 1,031,474    177K/s   in 12s     

2011-02-23 03:42:03 (87.5 KB/s) - `apache-couchdb-1.0.2.tar.gz' saved [1031474/1031474]
}}}

{{op1{4. 解壓縮 CouchDB 安裝檔 }}}
{{{
# tar xvfz apache-couchdb-1.0.2.tar.gz 
}}}

{{op1{5. 編譯與安裝}}}
{{{
# xulrunner -v
Mozilla XULRunner 1.9.2.13 - 20101206141910

# cd apache-couchdb-1.0.2/
# ./configure --prefix= --with-js-lib=/usr/lib/xulrunner-devel-1.9.2.13/lib --with-js-include=/usr/lib/xulrunner-devel-1.9.2.13/include
# make && make install
}}}

{{op1{6. 執行設定}}}
{{{
# useradd -d /var/lib/couchdb couchdb
# chown -R couchdb: /var/lib/couchdb /var/log/couchdb

# chown -R root:couchdb /etc/couchdb
# chmod 664 /etc/couchdb/*.ini
# chmod 775 /etc/couchdb/*.d
}}}

{{op1{7. 啟動 CouchDB}}}
{{{
# /etc/init.d/couchdb start
 * Starting database server couchdb                                      [ OK ] 
}}}

{{op1{8. 測試 CouchDB}}}
{{{
# curl http://127.0.0.1:5984/
程式 'curl' 目前尚未安裝。  您可以由輸入以下內容安裝:
apt-get install curl

# sudo apt-get install curl
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列【新】套件將會被安裝:
  curl
升級 0 個,新安裝 1 個,移除 0 個,有 18 個未被升級。
需要下載 208kB 的套件檔。
此操作完成之後,會多佔用 328kB 的磁碟空間。
下載:1 http://tw.archive.ubuntu.com/ubuntu/ lucid/main curl 7.19.7-1ubuntu1 [208kB]
取得 208kB 用了 0s (2,238kB/s)
選取了原先未被選取的套件 curl。
(正在讀取資料庫 ... 系統目前共安裝了 142408 個檔案和目錄。)
正在解開 curl (從 .../curl_7.19.7-1ubuntu1_i386.deb)...
正在進行 man-db 的觸發程式 ...
正在設定 curl (7.19.7-1ubuntu1) ...

# curl http://127.0.0.1:5984/
{"couchdb":"Welcome","version":"1.0.2"}
}}}

<<toBalaNotes "installcdb">>

///%installcdb
//%/
''參考文章''
1. TiddlyWiki on Node.js 
http://tiddlywiki.com/static/TiddlyWiki%2520on%2520Node.js.html

{{{
student@UB14DOS2:~/dos$ sudo npm install -g tiddlywiki
npm http GET https://registry.npmjs.org/tiddlywiki
npm http 200 https://registry.npmjs.org/tiddlywiki
npm http GET https://registry.npmjs.org/tiddlywiki/-/tiddlywiki-5.0.12-beta.tgz
npm http 200 https://registry.npmjs.org/tiddlywiki/-/tiddlywiki-5.0.12-beta.tgz
/usr/bin/tiddlywiki -> /usr/lib/node_modules/tiddlywiki/tiddlywiki.js
tiddlywiki@5.0.12-beta /usr/lib/node_modules/tiddlywiki
}}}

{{{
student@UB14DOS2:~/dos$ tiddlywiki mynewwiki --init server
Copied edition 'server' to mynewwiki
}}}

{{{
student@UB14DOS2:~/dos$ tiddlywiki mynewwiki --server
Serving on 127.0.0.1:8080
(press ctrl-C to exit)
}}}

''參考文章''
1. RHEL5 - virt-install 指令安裝 Guest 的方法
http://benjr.tw/node/366
2. Installing KVM Guests With virt-install On Ubuntu 12.04 LTS Server.
http://www.webhostingskills.com/articles/installing_kvm_guests_with_virt_install_on_ubuntu_12.04_lts_server

{{item1{安裝 virt-install 及 virt-viewer 套件 (可多人同時操作)}}}
{{{
student@US1204:~$  sudo apt-get install virtinst virt-viewer
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  python-libvirt python-urlgrabber
下列【新】套件將會被安裝:
  python-libvirt python-urlgrabber virt-viewer virtinst
升級 0 個,新安裝 4 個,移除 0 個,有 6 個未被升級。
需要下載 456 kB 的套件檔。
此操作完成之後,會多佔用 3,259 kB 的磁碟空間。
是否繼續進行 [Y/n]?
                           :
選取了原先未選的套件 python-libvirt。
(正在讀取資料庫 ... 目前共安裝了 175143 個檔案和目錄。)
正在解開 python-libvirt (從 .../python-libvirt_0.9.8-2ubuntu17.16_amd64.deb)...
選取了原先未選的套件 python-urlgrabber。
正在解開 python-urlgrabber (從 .../python-urlgrabber_3.9.1-4ubuntu2_all.deb)...
選取了原先未選的套件 virt-viewer。
正在解開 virt-viewer (從 .../virt-viewer_0.4.2-1_amd64.deb)...
選取了原先未選的套件 virtinst。
正在解開 virtinst (從 .../virtinst_0.600.1-1ubuntu3.3_all.deb)...
正在進行 man-db 的觸發程式 ...
正在設定 python-libvirt (0.9.8-2ubuntu17.16) ...
正在設定 python-urlgrabber (3.9.1-4ubuntu2) ...
正在設定 virt-viewer (0.4.2-1) ...
正在設定 virtinst (0.600.1-1ubuntu3.3) ...
}}}

{{item1{將 student 帳號加入 kvm 群組}}}
{{{
$ cat /etc/group | grep kvm
kvm:x:126:

$ sudo adduser student kvm
[sudo] password for student:
正將 `student' 使用者新增至 `kvm' 群組 ...
正在將使用者“student”加入到“kvm”群組中
完成。

$ cat /etc/group | grep kvm
kvm:x:126:student
}}}

{{item1{由光碟建立 Vyatta 虛擬電腦}}}

''1. 下載 Vyatta 安裝光碟''
{{{
student@UBDOS1:~$ wget http://192.168.77.1/vyatta-livecd_VC6.6R1_amd64.iso
--2014-07-26 10:48:09--  http://192.168.77.1/vyatta-livecd_VC6.6R1_amd64.iso
正在連接 192.168.77.1:80... 連上了。
已送出 HTTP 要求,正在等候回應... 200 OK
長度: 233832448 (223M) [application/octet-stream]
Saving to: `vyatta-livecd_VC6.6R1_amd64.iso'

100%[======================================>] 233,832,448 4.79M/s   in 45s     

2014-07-26 10:48:54 (5.00 MB/s) - `vyatta-livecd_VC6.6R1_amd64.iso' saved [233832448/233832448]
}}}
 
''2. 產生虛擬硬碟''
{{{
$ kvm-img create -f qcow2 vyatta.qcow2 1G
Formatting 'vyatta.qcow2', fmt=qcow2 size=1073741824 encryption=off cluster_size=65536
}}}

''3. 開始安裝''
{{{
$ $ virt-install -r 512 -n vyatta --disk path=./vyatta.qcow2,format=qcow2 --noreboot --cdrom vyatta-livecd_VC6.6R1_amd64.iso
}}}

''4. 登入 Vyatta 系統畫面, 輸入 vyatta/vyatta''
{{{
Welcome to Vyatta - vyatta ttyS0
vyatta login:
}}}

登入成功畫面, 如下 :

[img[img/vyatta/vyatta6601.png]]

''5. 輸入 install system 命令, 開始將 Vyatta 系統安裝至硬碟''

''$ install system''
The wizard is opened and you are going to through installation process:
Would you like to continue? (Yes/No) [YES]: ''Enter''
Partition (Auto/Union/Parted/Skip) [Auto]: ''Enter''
Install the image on? [sda]: ''Enter''
This will destroy all data on /dev/sda.
Continue? (Yes/No) [No]: ''Yes''
How big of root partition should I create? (1000MB – 1074MB) [1074]MB: ''Enter''
I found the following configuration files
/opt/vyatta/etc/config/config.boot

Which one should I copy to sda? [/opt/vyatta/etc/config/config.boot] ''Enter''
Enter password for administrator account
Enter vyatta password: ''student''
Retype vyatta password: ''student''
Which drive should GRUB modify the boot partition on? [sda]: ''Enter''
Done!

''6. Vyatta 虛擬主機關機''
{{{
vyatta@vyatta:~$ sudo shutdown -h now
}}}

''7. 檢視建立虛擬電腦''
{{{
$ virsh list --all
 Id 名稱               狀態
----------------------------------
  - vyatta               關機
}}}

''[重要]'' virt-install 建立的虛擬電腦設定檔, 存在使用者的家目錄 (/home/student), 而不是存在 Libvirt 管理平台的目錄 (/etc/libvirt/)

''8. 透過 virt-install 所建立的 vyatta 虛擬電腦, 其設定檔會建立在自己的家目錄 (.libvirt)''
{{{
$ tree -up .libvirt/
.libvirt/
└── [drwxrwxr-x student ]  qemu
    ├── [drwxrwxr-x student ]  cache
    ├── [drwxrwxr-x student ]  dump
    ├── [drwxrwxr-x student ]  lib
    ├── [drwxrwxr-x student ]  log
    │ └── [-rw------- student ]  vyatta.log
    ├── [drwxrwxr-x student ]  run
    ├── [drwxrwxr-x student ]  save
    ├── [drwxrwxr-x student ]  snapshot
    └── [-rw------- student ]  vyatta.xml

8 directories, 2 files
}}}
<<toBalaNotes "1">>
{{item1{管理  vyatta 虛擬電腦}}}
''1. 啟動 vyatta 虛擬電腦''
{{{
$ virsh --connect qemu:///session start vyatta
區域 vyatta 已開啟
}}}

''[重要]'' 上面命令並不會改變 家目錄中 vyatta.qcow2 硬碟檔權限, 這樣的運作方式, 允許多人可同時啟動各自的虛擬主機 @@color:red;(實測過)@@

''2. 進入 virsh 虛擬化的互動模式終端機''
{{{
$ virsh --connect qemu:///session 
歡迎使用 virsh - 虛擬化的互動模式終端機。

類型:  「help」以取得指令的求助畫面
        「quit」離開

virsh # list
 Id 名稱               狀態
----------------------------------
  1 vyatta               執行中

virsh # quit
}}}

''3. 登入 vyatta 虛擬電腦''
{{{
$ virsh --connect qemu:///session console vyatta
Connected to domain vyatta
Escape character is ^]

Welcome to Vyatta - vyatta ttyS0

vyatta login: vyatta
Password: 
Linux vyatta 3.3.8-1-586-vyatta-virt #1 SMP Wed Mar 13 10:54:37 PDT 2013 i686
Welcome to Vyatta.
This system is open-source software. The exact distribution terms for 
each module comprising the full system are described in the individual 
files in /usr/share/doc/*/copyright.
vyatta@vyatta:~$ 
}}}

''[重要]'' 按 Ctrl + ] 複合鍵, 離開 vyatta 虛擬電腦

''4. 關閉 vyatta 虛擬電腦''
{{{
$ virsh --connect qemu:///session shutdown vyatta
區域 vyatta 正在執行關機

$ virsh --connect qemu:///session list
 Id 名稱               狀態
----------------------------------
}}}

<<toBalaNotes "2">>
{{item1{直接匯入硬碟檔來建立虛擬電腦}}}
另一個使用者登入後一樣可以執行以下命令

''1. 複製硬碟''
{{{
$ cp /home/student/vyatta.qcow2  vyatta01.qcow2 
}}}

''2. 硬碟匯入''
{{{
$ virt-install --name vyatta01 --ram 1024 --disk path=~/vyatta01.qcow2,format=qcow2 --import

Starting install...
Creating domain...                                       |    0 B     00:01     
Domain creation completed. You can restart your domain by running:
  virsh --connect qemu:///session start vyatta01
}}}

''3. 關機''
{{{
$ sudo shutdown -h now
}}}

''4. 透過 virt-install 所建立的 vyatta 虛擬電腦(vyatta01.xml), 其設定檔會建立在自己的家目錄 (.libvirt)''
{{{
$ tree -up .libvirt/
.libvirt/
└── [drwxrwxr-x student ]  qemu
    ├── [drwxrwxr-x student ]  cache
    ├── [drwxrwxr-x student ]  dump
    ├── [drwxrwxr-x student ]  lib
    ├── [drwxrwxr-x student ]  log
    │ ├── [-rw------- student ]  vyatta01.log
    │ └── [-rw------- student ]  vyatta.log
    ├── [drwxrwxr-x student ]  run
    ├── [drwxrwxr-x student ]  save
    ├── [drwxrwxr-x student ]  snapshot
    ├── [-rw------- student ]  vyatta01.xml
    └── [-rw------- student ]  vyatta.xml

8 directories, 4 files
}}}

{{item1{直接網路安裝 - Ubuntu Server 12.04 (視網路速度, 決定安裝時間)}}}

''1. 產生虛擬硬碟''
{{{
$ kvm-img create -f qcow2 us1204.qcow2 2G
}}}

''2. 開始安裝''
{{{
$ virt-install -n us1204 -r 1024 --vcpus=1 --disk path=~/us1204.qcow2,format=qcow2 --os-type linux --os-variant ubuntuprecise --location http://tw.archive.ubuntu.com/ubuntu/dists/precise/main/installer-i386/ --hvm --nographics --extra-args="auto text console=tty1 console=ttyS0,115200"
}}}

''Advanced topic: Installing over the network''
{{{
Instead of downloading the ISO, you can install from public repositories over HTTP. To do this, remove the --cdrom option and instead specify the -l URL option. Some common locations that virt-install knows how to handle:

Fedora: -l http://download.fedoraproject.org/pub/fedora/linux/releases/13/Fedora/i386/os/ Change "13" to the Fedora release, and "i386" to x86_64 for a 64 bit guest.
Debian: -l http://ftp.us.debian.org/debian/dists/stable/main/installer-i386/ Change "us" to your country code (for faster access to a local mirror), and "i386" to amd64 for a 64 bit guest.
Ubuntu: -l http://ftp.ubuntu.com/ubuntu/dists/maverick/main/installer-i386/ Change "maverick" to the name of the version of Ubuntu to install, and "i386" to amd64 for a 64 bit guest.
}}}
<<toBalaNotes "2">>
''virt-install 與 vmbuilder 的差異''
{{{
Unlike virt-manager, virt-install is a command line tool that allows you to create KVM guests on a headless server. You may ask yourself: "But I can use vmbuilder to do this, why do I need virt-install?" The difference between virt-install and vmbuilder is that vmbuilder is for creating Ubuntu-based guests, whereas virt-install lets you install all kinds of operating systems (e.g. Linux, Windows, Solaris, FreeBSD, OpenBSD) and distributions in a guest, just like virt-manager. This article shows how you can use it on an Ubuntu 12.10 KVM server.
}}}

///%1
//%/

///%2
//%/
''參考文章''
1. 虛擬主機 XML 設定檔說明
http://libvirt.org/format.html
2. 如何變更遠端桌面聽候的連接埠 (Windows XP/2003)
http://support.microsoft.com/kb/306759

{{item1{匯出虛擬電腦設定檔}}}

''1. 檢視目前存在的虛擬電腦''
{{{
$ virsh list --all
 Id 名稱               狀態
----------------------------------
  - UD104SJSD            關機
  - US1041-NS             關機
  - US104-100-RD         關機
  - US104-192-168-100-NAT 關機
  - US104-192-168-100-NS 關機
}}}

''2. 匯出已存在虛擬電腦設定檔''
{{{
$ virsh dumpxml US1041-NS > US1041-NS.xml
}}}

@@color:red;''[注意]''@@ 執行 ''virsh dumpxml'' 命令時, 要確定虛擬主機是 ''關機'', 否則匯出的 XML 資訊中, 會在 <interface> 標籤中, 多出一個 <target> 標籤, 如下例 :
{{{
<interface type='bridge'>
    <mac address='52:54:00:59:af:5b'/>
    <source bridge='br0'/>
    <target dev='vnet0'/>                (多餘的標籤)
    <model type='virtio'/>
</interface>
}}}

''3. 檢視虛擬電腦設定檔''
{{{
# cat US1041-NS.xml 
<domain type='kvm'>
  <name>US1041-NS</name>
  <uuid>7294ab00-c6d0-a434-1de6-1001169bab00</uuid>
  <memory>262144</memory>
  <currentMemory>262144</currentMemory>
  <vcpu>1</vcpu>
  <os>
    <type arch='i686' machine='pc-0.12'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <clock offset='utc'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>restart</on_crash>
  <devices>
    <emulator>/usr/bin/kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu'/>
      <source file='/root/KVMLab/US1041_NS.img'/>
      <target dev='vda' bus='virtio'/>
    </disk>
    <interface type='network'>
      <mac address='52:54:00:54:da:8c'/>
      <source network='default'/>
      <model type='virtio'/>
    </interface>
    <console type='pty'>
      <target port='0'/>
    </console>
    <input type='mouse' bus='ps2'/>
    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='en-us'/>
    <video>
      <model type='cirrus' vram='9216' heads='1'/>
    </video>
  </devices>
</domain>
}}}

<<toBalaNotes "1">>
{{item1{快速建立虛擬主機}}}

''1. 複製新增虛擬主機所需的檔案''
{{{
$ cd KVMLab
$ cp  newvm.xml  US1041-NS01.xml
$ cp US1041_NS.img  US1041_NS01.img
}}}

''2. 修改 US1041-NS01.xml  虛擬主機設定檔''
{{{
$ sudo nano US1041-NS01.xml 
<domain type='kvm'>
  <name>US1041-NS01</name>                              # 重設虛擬主機名稱, 要確定具有唯一性
  <uuid>7294ab00-c6d0-a434-1de6-1001169bab00</uuid>                # 刪除這行, 建立虛擬主機時, 會重新產生
  <memory>262144</memory>
  <currentMemory>262144</currentMemory>
  <vcpu>1</vcpu>
                             :
                             :
  <devices>
    <emulator>/usr/bin/kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu'/>
      <source file='/root/KVMLab/US1041_NS01.img'/>                  #  給予正確的 image 檔完整目錄
      <target dev='vda' bus='virtio'/>
    </disk>
    <interface type='network'>
      <mac address='52:54:00:11:22:33'/>                           # 修改 MAC 位址, 要確定具有唯一性
      <source network='default'/>                             # 虛擬網路名稱要一致 (注意大小寫)
      <model type='virtio'/>
    </interface>
    <console type='pty'>
      <target port='0'/>
    </console>
    <input type='mouse' bus='ps2'/>
    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='en-us'/>
    <video>
      <model type='cirrus' vram='9216' heads='1'/>
    </video>
  </devices>
</domain>
}}}

''3. 由虛擬主機設定檔, 產生虛擬主機''
{{{
# virsh define US1041-NS01.xml 
區域 US1041-NS01 定義自 US1041-NS01.xml
}}}

''4. 啟動新增虛擬主機''
{{{
# virsh list --all
 Id 名稱               狀態
----------------------------------
  - US1041-NS01            關機
  - US104_SRV            關機
  - W2K3-192-168-100     關機

# virsh start US1041-NS01
區域 US1041-NS01 已開啟

# virsh list --all
 Id 名稱               狀態
----------------------------------
  1 US1041-NS01     執行中
  - US104_NAT            關機
  - US104_SRV            關機
}}}

''5. 虛擬主機關機''
{{{
root@KVMFS:~# virsh shutdown DN2
區域 DN2 正在執行關機
}}}

{{item1{快速移除虛擬電腦}}}

{{{
$ virsh start DN2
區域 DN2 已開啟

$ virsh destroy DN2                (destroy 是強迫關機)
區域 DN2 已經刪除

$ virsh undefine DN2               (undefine 是移除虛擬電腦)
區域 DN2 已經取消定義
}}}

<<toBalaNotes "2">>
{{item1{更改虛擬電腦名稱}}}
在 Virtual Machine Manager (0.8.4) 工具中, 無法更改虛擬電腦名稱, 從 ''0.8.7 這版本'' 可直接修改虛擬電腦名稱

''1. 匯出虛擬電腦 XML 設定檔''
下式命令中的 US104 (注意大小寫) 是虛擬電腦的名稱
{{{
$ sudo virsh dumpxml US104
<domain type='kvm'>
  <name>US104</name>
  <uuid>97eb0f7d-6d78-d131-5449-91687d2199e5</uuid>
  <memory>524288</memory>
  <currentMemory>524288</currentMemory>
  <vcpu>1</vcpu>
               :
</domain>
$ sudo virsh dumpxml US104 > rename.xml
}}}

''2. 修改先前匯出定義檔''
{{{
$ sudo nano rename.xml

<domain type='kvm'>
  <name>US104NAT_0_254</name>                                # 重設虛擬主機名稱, 要確定具有唯一性
  <uuid>97eb0f7d-6d78-d131-5449-91687d2199e5</uuid>
  <memory>524288</memory>
  <currentMemory>524288</currentMemory>
  <vcpu>1</vcpu>
                              :
</domain>
}}}

''3. 重新定義虛擬主機''
因 UUID 是一樣, 所以會改變原先設定
{{{
$ sudo virsh define rename.xml
區域 US104NAT_0_254 定義自 rename.xml
}}}

''4. 檢視執行結果''

[img[img/kvm/VMRename.png]]

<<toBalaNotes "3">>



///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/
<<timeline>>

{{{
在 [學習筆記本] 的文章中可直接寫入 HTML 網頁, 本文範例使用 &lt;embed&gt; 顯示外部網站中的 Flash 檔, 內容如下

<html>
<body>  
<div class='widget-content'>
<embed quality="high" align="right" type="application/octet-stream" height="100%" src="http://imgfree.21cn.com/free/flash/17.swf" style="LEFT: 0px; WIDTH: 100%;  HEIGHT: 100%" width="100%" wmode="transparent"></embed>
</div>
</body>
</html>
}}}
''參考文章''
1. A Look At Mobile Web Design – 5 Sites To Visit From Your Android Phone
http://androidandme.com/2009/06/news/a-look-at-mobile-web-design-5-sites-to-visit-from-your-android-phone/
2. Mobile META Tags
http://learnthemobileweb.com/tag/handheldfriendly/

{{item1{建置網站目錄架構}}}
{{{
$ tree www
www
├── T2
│ └── index.html
}}}

{{item1{首頁設計}}}
{{{
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>on cloud 9</title>
<meta name="viewport" content="width=device-width, initial-scale=1.5, maximum-scale=2.0, minimum-scale=1.0, user-scalable=yes"/>
<meta name="HandheldFriendly" content="True"/>
</head>

<body>
<a href="/cgi-bin/uptime.cgi">1. System Information</a>
</body>
</html>
}}}
We have used the ''viewport meta'' key to help improve the presentation of content displayed in Mobile Safari. Initial-scale is the scale that the content is viewed at when the user first visits the page and maximum-scale and minimum-scale are the scales that determine how much the user can zoom in/out on content. Because all of these values are 1 in our code the user is unable to zoom in/out on the content which is what we want. Or at least, what I want. There is also user-scalable which determines whether the user can zoom in and out on content. The default is yes and so because I don’t want my users to be able to zoom in, i’ve set it to no.

''The HandheldFriendly META Tag''
The HandheldFriendly META tag was originally supported by AvantGo mobile browsers in Palm devices to identify mobile-optimized markup. Today, it is widely interpreted by mobile browsers and spiders as an indicator of mobile markup and a directive to display the web document without scaling. The value of the META tag is “true” for mobile markup and “false” for desktop-optimized HTML.

''The Viewport META Tag''
Many smartphone browsers scale Web pages to a wide viewport width, one appropriate for displaying desktop-optimized markup. These browsers allow the user to zoom in and out of scaled Web pages. For example, Opera Mobile uses a default viewport width of 850 pixels, and the iPhone uses a default width of 980 pixels.

The Viewport META tag controls the logical dimensions and scaling of the browser viewport window in many smartphone browsers, including Safari Mobile for the iPhone, Android browser, webOS browser in Palm Pre and Pixi devices, Opera Mini, Opera Mobile and BlackBerry browsers. The presence of the Viewport META tag indicates that the markup document is optimized for mobile devices.

Here is a simplified version of the Viewport tag that sets the browser viewport width at 240 pixels and disables user scaling of the content:
{{{
<meta name="viewport" content="width=240,user-scalable=no" />
}}}
The content value of the Viewport META tag is a comma-delimited list of directives and
their values.

This example <meta> tag lists all Viewport directives and example values:
{{{
<meta name="viewport" content="width=240, height=320, user-scalable=yes,
initial-scale=2.5, maximum-scale=5.0, minimum-scale=1.0" />
}}}

''Viewport META 說明''
|Viewport META directive|Example Value|Description|
|width|width=320, width=device-width|Logical width of the viewport, in pixels. The special device-width value indicates that the viewport height should be the screen width of the device.|
|height|height=480,height=device-height|Logical height of the viewport, in pixels. The special device-height value indicates that the viewport height should be the screen height of the device.|
|user-scalable|user-scalable=no|Specifies whether the user can zoom in and out of the viewport, scaling the view of a Web page. Possible values are yes or no.|
|initial-scale|initial-scale=2.0 | Sets the initial scaling or zoom factor (or multiplier) used for viewing a Web page. A value of 1.0 displays an unscaled Web document.|
|maximum-scale|maximum-scale=2.5|Sets the user’s maximum limit for scaling or zooming a Web page. Values are numeric and can range from 0.25 to 10.0. The value of this directives is a scaling factor or multiplier applied to the viewport contents.|
|minimum-scale|minimum-scale=0.5|Sets the user’s minimum limit for scaling or zooming a Web page. Values are numeric and can range from 0.25 to 10.0. The value of this directives is a scaling factor or multiplier applied to the viewport contents.|

{{item1{CGI 程式撰寫}}}

''1. 撰寫 uptime.cgi 程式''
{{{
#!/bin/bash

PATH="/bin:/usr/bin:/usr/ucb:/usr/opt/bin"
export $PATH
echo "Content-type: text/html"
echo ""

echo '<html>'
echo '<head>'
echo '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">'
echo '<link rel=stylesheet type="text/css" href="/T2/style.css">'

echo '<title>System Uptime</title>'
echo '</head>'
echo '<body>'

echo '<h3>'
hostname
echo '</h3>'

uptime

echo '</body>'
echo '</html>'

exit 0
}}}

''2. 存到 /usr/lib/cgi-bin 目錄''

{{item1{執行畫面}}}

''1. 輸入 http://140.137.214.250/T2''

[img[img/android/androidcgi01.png]]

''2. 點選上圖中 System Uptime 超連接''

[img[img/android/androidcgi02.png]]

{{item1{網頁樣式設計}}}

''1. 設定背景顏色''

{{{
$ nano /var/www/T2/style.css
body {
background :red;                            /* 背景底色 */
}
}}}

''2. 加入背景圖''
{{{
body {
background :red;                            /* 背景底色 */
background-image : url('abc.gif');      /* 背景圖片網址 */
background-repeat : repeat;            /* 背景圖片重複排列 */

 /* 背景圖片固定 : Sets whether a background image is fixed or scrolls with the rest of the page*/
background-attachment: fixed;       

background-position:top right;          /* 將笑臉圖顯示在右上角 */
}

}}}

<<toBalaNotes "1">>



///%1
//%/
''參考文章''
1. 新一代的企業個人電腦 - 虛擬化桌面
http://www.sysage.com.tw/guest/GoGoBuyOne.aspx?id=21

『史上最大電腦換機潮,未來 2 年達 2.7 億台』這是去年 10 月第 1142 期商業周刊的封面主題。封面文章敘述著千禧年之後,全球的 PC 市場因為持續經歷了網路泡沫、 SARS  危機、以及史上最大金融海嘯的問題,所以一直沒有將真正的潛力爆發出來。而最強的換機潮終於要在 2010~2011 年正式登場,金融海嘯後的經濟復甦,以及微軟   Windows  7  的推出正是這波PC  大幅汰舊換新的主要原因。

根據  IDC  的預估, Windows 7  在 2010 年將佔有企業購買微軟作業系統的 49.5% ,相當於高達 5 千 8 百萬份的出貨量。而許多既有的個人電腦 (PC) 硬體勢必也將因為規格老舊,無法配合諸如螢幕觸控、多媒體應用等   Windows  7  的嶄新功能而需要被汰舊換新,因此造就一波換機潮。商業周刊提到這波換機潮的主要新興個人終端電腦設備產品包括小筆電、以及嘗試取代個人電腦的  AIO( 一體成型電腦 ) 等等。

''Apple. 推出 iPad 平板電腦,搶佔個人終端設備市場''

不過,資訊產業的特色就是快速多變,預估與規劃總是趕不上實際的變化。今年 4 月份起才逐步在全球各地銷售的蘋果 (Apple) 公司全新終端設備— iPad  平板電腦,在美國市場一推出便造成熱銷,而將來勢必也會在個人終端設備市場中佔領重要的一席地位,並可能改變商業周刊去年 10 月封面故事對個人電腦的銷售預估數字。

iPad  可說是   iPhone  的放大版,重量僅 0.7 公斤而擁有 9.7 吋的觸控螢幕,並傳承了  Apple  產品一貫的強大功能及酷炫外型;其定位介於電腦與智慧型手機之間,強調更適合處理上網、收發郵件、觀賞多媒體資料、及閱讀電子書等等工作。 2001 年以來累計達 2 億 5 千萬台   iPod ,以及光是 2009 年就有 2 千 7 百萬台iPhone  的銷售亮眼成績便是大家預期   iPad  也會成功的最有力依據,功能多樣又先進的  iPad  想必讓許多人都會想擁有它。

但是外出時到底要帶筆記型電腦出門,還是帶  iPad出門呢?帶筆記型電腦出門怕不夠炫,帶  iPad 出門又怕因為不是微軟  Windows 作業平台而無法處理公事,真是傷腦筋!難道要當雙槍俠,隨時帶著兩台設備出門嗎?那也太累了吧!要是有個兩全齊美的方法,能將  Windows  7 放在 iPad 上執行,讓比爾蓋茲 (Bill Gates) 和史帝夫賈伯斯 (Steve Jobs) 兩大資訊業界奇才的最新創作整合起來同時為你服務,那就太帥了。

''桌面虛擬化實現夢想''

感謝雲端運算及虛擬化技術的快速發展,兩全齊美的方法其實已經存在了。虛擬化技術的發展非常快速,在完成伺服器及應用程式的虛擬化之後,已經將觸角延伸到了個人電腦的桌面,讓用戶端的作業系統及應用軟體能夠與硬體設備脫離,不需要再永遠被綁在一起了。所以,具備員工個人電腦桌面虛擬化(Desktop Virtualization)  能力的企業資訊中心,便能將員工上班所使用的電腦作業平台、應用程式、及個人化設定包裹起來在機房執行,再透過網路將結果傳送到員工手上諸如  iPad 、智慧型手機等設備。

這樣,員工隨時隨地可以感覺好像身在辦公室一樣的處理公事,電腦畫面及使用習慣都不需有所更動,而公司則可獲得員工更多的產值,真是兩全齊美啊。這種新一代的桌面虛擬化架構讓專家也都很看好其未來的發展,例如   IDC 便預估 2010 年雲端服務將漸趨成熟,桌面虛擬化的普及度更將提升至 34% 。而   Gartner  在 2009 年 12 月發表的“ 2010 年用戶終端  PC  預估 (Predicts 2010: PC End-User  Issues) ”文章中也預期在 2012 年的時候,60% 的全球企業  PC 將使用桌面虛擬化的技術。

''桌面虛擬化:員工、企業、資訊人員 3 贏的架構''
    
目前,伺服器及應用程式虛擬化架構是將機房中企業及員工所需要用到的伺服器作業系統及如   ERP 、C R M  等軟體程式自伺服器硬體中脫離出來,並且包裹起來成為所謂的 “Bubble  ( 泡泡 )” 形式在共用的硬體平台上執行,如果硬體有問題或需升級時,“Bubble” 的軟體包便很容易地隨時搬移到另一個硬體上執行,省時、省事又安全。

'而企業個人電腦桌面虛擬化的運作便是延續伺服器及應用程式虛擬化的架構而來的。但這次 “Bubble” 中要包裹的則是員工   PC  的作業系統、個人桌面設定、以及如郵件收發、文書處理等應用程式。企業機房中需要添購專門執行桌面虛擬化的硬體伺服器,在收到員工的連線需求之後,便以 “On-demand” 的方式立即將該員工所需的 “Bubble” 軟體包裹、執行,然後快速地將結果畫面透過區域或廣域網路傳送到員工的終端設備上,來達到桌面虛擬化的功能。

<<toBalaNotes "vdesktop">>


///%vdesktop
//%/
''參考文章''
1. How you can use qemu/kvm base images to be more productive (Part 1)
http://www.linux-kvm.com/content/how-you-can-use-qemukvm-base-images-be-more-productive-part-1
2. How-to grow any Qemu system image
http://kevin.deldycke.com/2007/04/how-to-grow-any-qemu-system-image/
3. How to access virtual machine image files from the host 
http://blog.vmsplice.net/2011/02/how-to-access-virtual-machine-image.html
4. Mounting KVM qcow2 qemu disk images
http://blog.loftninjas.org/2008/10/27/mounting-kvm-qcow2-qemu-disk-images/
5. 基於 KVM 與 libvirt 的虛擬化叢集系統-儲存空間的配置@@color:red; (重要)@@
http://www.openfoundry.org/tw/tech-column/8539-the-clustered-virtualization-system-based-on-kvm-and-libvirt-chapter-of-debian-storage-allocation

{{item1{Tiny Server Core 虛擬電腦的硬碟使用}}}
Tiny Server Core 是以 Live CD 方式執行, 內定是不具儲存資料功能, 如要具有儲存資料功能, 請先替它產生一顆硬碟, 然後分割, 格式化此顆硬碟, 便可使得 Tiny Server Core 具有儲存能力.

''1. 產生虛擬硬碟檔''
{{{
$ kvm-img create -f qcow2 tsc32.qcow2 50m
Formatting 'tsc32.qcow2', fmt=qcow2 size=52428800 encryption=off cluster_size=65536 
}}}

qcow2 是 "隨需擴增" 的硬碟格式, 這種格式在 Windows XP/2003/Vista/7 是無法識別, 並且無法在 Linux 系統被 loop 裝置掛載, 因它無法使用 offset 參數指定 "硬碟分割表".

''2. 啟動 Tiny Server Core  虛擬電腦''
{{{
$ kvm -m 128 -cdrom tsc32.iso -hda tsc32.qcow2 -boot d  -nographic -curses 
}}}

''3. 分割及格式化虛擬硬碟檔''

''硬碟磁區分割''
{{{
 tc@box:~$ sudo fdisk /dev/sda                                                   
 Device contains neither a valid DOS partition table, nor Sun, SGI, OSF or GPT di
 sklabel                                                                         
 Building a new DOS disklabel. Changes will remain in memory only,               
 until you decide to write them. After that the previous content                 
 won't be recoverable.                                                           
                                                                                 
                                                                                 
 Command (m for help): n                                                         
 Command action                                                                  
    e   extended                                                                 
    p   primary partition (1-4)                                                  
 p                                                                               
 Partition number (1-4): 1                                                       
 First cylinder (1-6, default 1): Using default value 1                          
 Last cylinder or +size or +sizeM or +sizeK (1-6, default 6): Using default value
  6                                                                              
                                                                                 
 Command (m for help): w                                                         
 The partition table has been altered.                                           
 Calling ioctl() to re-read partition table   
}}}

''硬碟格式化''
{{{
 tc@box:~$ sudo mkfs.ext4 /dev/sda1                                              
 mke2fs 1.41.14 (22-Dec-2010)                                                    
 Filesystem label=                                                               
 OS type: Linux                                                                  
 Block size=1024 (log=0)                                                         
 Fragment size=1024 (log=0)                                                      
 Stride=0 blocks, Stripe width=0 blocks                                          
 12048 inodes, 48160 blocks                                                      
 2408 blocks (5.00%) reserved for the super user                                 
 First data block=1                                                              
 Maximum filesystem blocks=49545216                                              
 6 block groups                                                                  
 8192 blocks per group, 8192 fragments per group                                 
 2008 inodes per group                                                           
 Superblock backups stored on blocks:                                            
         8193, 24577, 40961                                                      
                                                                                 
 Writing inode tables: done                                                      
 Creating journal (4096 blocks): done                                            
 Writing superblocks and filesystem accounting information: done                 
                                                                                 
 This filesystem will be automatically checked every 31 mounts or                
 180 days, whichever comes first.  Use tune2fs -c or -i to override. 
}}}

@@color:red;''[重要]'' 虛擬硬碟檔分割及格式化完成, 記得一定要重新開機@@

''4. 安裝應用套件''
在 TSC 系統下, 使用 tce-load 這命令執行應用套件安裝.
{{{
$ tce-load -wi coreutils.tcz 
}}}
''-wi'' 此參數功能為下載的套件, 系統啟動過程會自動掛載進來

安裝 util-linux.tcz 套件,  這套件含有 setterm 命令, setterm 這命令用來取消螢幕保護裝置
{{{
$ tce-load -wi util-linux.tcz
Downloading: util-linux.tcz
Connecting to distro.ibiblio.org (152.19.134.43:80)
util-linux.tcz    100% |*******************************|   532k 00:00:00 ETA
util-linux.tcz: OK
}}}

安裝 tree 命令套件,此命令用來顯示目錄結構
{{{
$ tce-load -wi tree.tcz
}}}

安裝 bash 命令套件, 此命令提供完整 Shell 功能
{{{
$ tce-load -wi bash.tcz
}}}

安裝 nano 命令套件, 此命令提供一個簡易友善的文字編輯器
{{{
$ tce-load -wi nano.tcz
}}}

''檢視套件存放目錄''
{{{
$ tree /mnt/sda1
     /mnt/sda1/                                                                     
     |-- lost+found [error opening dir]                                             
     |-- mydata.tgz                                                                 
     `-- tce                                                                        
         |-- onboot.lst                                                             
         |-- ondemand                                                               
         `-- optional                                                               
             |-- acl.tcz                                                            
             |-- acl.tcz.dep                                                        
             |-- acl.tcz.md5.txt                                                    
             |-- attr.tcz                                                           
             |-- attr.tcz.dep                                                       
             |-- attr.tcz.md5.txt                                                   
             |-- bash.tcz                                                           
             |-- bash.tcz.dep                                                       
             |-- bash.tcz.md5.txt                                                   
             |-- coreutils.tcz                                                      
             |-- coreutils.tcz.dep                                                  
             |-- coreutils.tcz.md5.txt
}}}

''5. 修改系統設定 - 固定 IP 設定''
Tiny Core Linux 在還沒登入之前, 會以 root 身份, 執行 /opt/bootlocal.sh, 所以網路相關設定可在此程式中設定. 如需了解更多網路相關設定, 請參考 [[Tiny Core 網路設定 - DHCP, 固定 IP 及 NAT|http://linuxkvm.blogspot.tw/2011/07/tiny-core_24.html]]

在 /opt/bootlocal.sh 程式中, 加入以下命令 :
{{{
$ sudo nano /opt/bootlocal.sh
                   :
# 取消 DHCP Client 功能, 這樣才不會覆蓋你所設定的固定 IP
x=$(ps aux | grep udhcpc)
y=$(echo $x | cut -d' ' -f1)
kill $y
sleep 6

# 設定 eth0 網卡 IP 位址
ifconfig eth0 10.0.2.25 netmask 255.255.255.0 up
# 設定 Default Gateway
route add default gw 10.0.2.3
# 設定 DNS Server 
echo "nameserver 168.95.1.1" > /etc/resolv.conf
}}}
程式修改後, 儲存設定, 命令如下 :
{{{
$ filetool.sh -b
}}}
重新開機, 然後測試網路
{{{
$ sudo reboot
}}}
<<toBalaNotes "1">>
{{item1{Tiny Core Linux 硬碟安裝 - Frugal}}}

''1. 下載 CorePlus 安裝光碟''

請到 http://distro.ibiblio.org/tinycorelinux/downloads.html, 點選 "CorePlus" 超連接, 下載 CorePlus-current.iso 至 ISO 目錄

''2. 產生虛擬硬碟''
{{{
$ kvm-img create -f qcow2 tsc32.qcow 80m
Formatting 'tsc32.qcow', fmt=qcow2 size=83886080 encryption=off cluster_size=65536 
}}}

''3. 啟動 Tiny Core Linux''
{{{
$ kvm -m 256 -cdrom CorePlus-current.iso -hda tsc32.qcow -boot d -usbdevice tablet
}}}

''[註]'' "-usbdevice tablet" 這個參數, 只在 Nested VM 架構才需使用

''4. 啟動安裝程式''

[img[img/tinycore/tcl03.png]]

''5. 勾選 "Frugal" 項目, 然後勾選 "Whole Disk", 最後選擇 sda 這個硬碟 (不需要先分割此硬碟)''

[img[img/tinycore/tcl04.png]]

''6. 選擇 ext4 檔案系統''

[img[img/tinycore/tcl05.png]]

''7. 輸入核心參數''

[img[img/tinycore/tcl06.png]]

''8. 選擇 "Core Only" 系統運作模式''

[img[img/tinycore/tcl07.png]]

''9. 關機並重新開機''
{{{
$ kvm -m 256 -hda tsc32.qcow -boot c 
}}}
<<toBalaNotes "2">>
{{item1{QEMU 差異硬碟檔}}}
qcow2 新增 "差異硬碟(Copy On Write)" 功能, 你可產生一個 Base image, 給許多虛擬電腦同時共用, 各個虛擬電腦差異的部分, 存在各自的 ''差異硬碟檔'' 中, 這對於虛擬電腦的硬碟檔總量大小來說, 可節省不少空間. 在實作環境 ''差異硬碟'' 比較適合用在 "多台小量差異的 GuestOS" 上,如果同時有多部差異較大的系統運作, 要去比對差異, 這勢必會造成系統效能降低, 應用在正式的虛擬伺服器上,建議還是使用專用虛擬硬碟.

''建立隨需擴增虛擬硬碟''
{{{
$ kvm-img create -f qcow2 vmdisk/vyatta64.qcow2 1G
Formatting 'vmdisk/vyatta64.qcow2', fmt=qcow2 size=1073741824 encryption=off cluster_size=65536 
}}}

''安裝 Vyatta 系統至虛擬硬碟''
{{{
$ kvm -m 512 -hda vmdisk/vyatta64.qcow2 -cdrom ISO/vyatta-livecd-virt_VC6.4-2012.05.31_amd64.iso -boot d
$ kvm -m 512 -hda vmdisk/vyatta64.qcow2 -boot c
}}}

''建立差異硬碟檔''
{{{
$ kvm-img create -b ~/vmdisk/vyatta64.qcow2 -f qcow2 vt01.qcow2
Formatting 'vt01.qcow2', fmt=qcow2 size=1073741824 backing_file='vmdisk/vyatta64.qcow2' encryption=off cluster_size=65536 
}}}

''[重點]'' -b 參數所指定的主要虛擬硬碟檔 (base images), ''必須給絕對路徑''. 主要虛擬硬碟檔的格式可以是 raw.

''檢視差異硬碟檔資訊''
{{{
$ kvm-img info vt01.qcow2 
image: vt01.qcow2
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 136K
cluster_size: 65536
backing file: vmdisk/vyatta64.qcow2 (actual path: vmdisk/vyatta64.qcow2)
}}}

''使用差異硬碟檔''
{{{
$ kvm -m 512 -hda vt01.qcow2 -boot c
}}}

{{item1{合併 Base 硬碟檔與差異硬碟檔}}}
{{{
# DN01.qcow2 的 Base 檔是 hdp10432.qcow
$ file vmdisk/DN01.qcow2 
vmdisk/DN01.qcow2: QEMU QCOW Image (v2), has backing file (path hdp10432.qcow), 2147483648 bytes

# 備份 Base 檔
$ cp vmdisk/hdp10432.qcow hdp10432.qcow

# 將差異硬碟檔內容寫回 Base 檔
$ kvm-img commit -f qcow2 vmdisk/DN01.qcow2 
Image committed.

$ mv vmdisk/hdp10432.qcow vmdisk/DN01.qcow2 
$ cp hdp10432.qcow vmdisk/
}}}

<<toBalaNotes "2">>
{{item1{增加硬碟大小}}}

''1. 產生一個 4G 隨需擴增硬碟檔''
{{{
$ kvm-img create -f qcow2 TC383.img 4G
Formatting 'TC383.img', fmt=qcow2 size=4294967296 encryption=off cluster_size=0 
}}}

''2.將可擴充硬碟格式 (qcow), 轉換成不可擴充硬碟格式 (raw)''
{{{
$ kvm-img convert TC383.img -O raw system.raw
}}}

''3. 檢視要擴充的硬碟檔資訊''
{{{
$ kvm-img info system.raw 
image: system.raw
file format: raw
virtual size: 4.0G (4294967296 bytes)
disk size: 0

# 以下命令, 會顯示實際檔案的大小
$ ls -al system.raw    
-rw-r--r-- 1 student student 4294967296 2011-08-26 15:18 system.raw
}}}

''4. 產生實際大小為 1G 硬碟檔''
{{{
$ dd if=/dev/zero of=zeros.raw bs=1024k count=1024
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 41.8319 s, 25.7 MB/s
}}}

''5. 將 system.raw 擴充為 5G''
{{{
$ cat system.raw zeros.raw > big5G.raw
$ ll big5G.raw 
-rw-r--r-- 1 student student 5368709120 2011-08-26 15:29 big5G.raw
}}}

最後擴充的硬碟檔, 其原先系統的分割區, 並沒有改變, 所以這時你可以使用 [[GParted Live CD|http://gparted.sourceforge.net/livecd.php]] 來重新規畫, 執行命令如下 :
{{{
$ kvm -hda big5G.raw -cdrom gparted-livecd-0.3.4-5.iso -boot d
}}}

''6. 將不可擴充硬碟格式(raw), 轉換成可擴充硬碟格式 (qcow)''
{{{
$ kvm-img convert big5G.raw -O qcow growed-system.qcow

$ ll growed-system.qcow 
-rw-r--r-- 1 student student 20528 2011-08-26 15:33 growed-system.qcow

$ kvm-img info growed-system.qcow 
image: growed-system.qcow
file format: qcow
virtual size: 5.0G (5368709120 bytes)
disk size: 24K
cluster_size: 4096
}}}

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
本文網址 : http://www.ithome.com.tw/itadm/article.php?c=50872

利用 ''虛擬磁碟檔案'' 或 ''原生裝置(Raw Device)''存取,是虛擬環境兩種基本存取類型,前者具有更彈性的管理功能,後者則具較佳效能,且便於利用儲存設備的進階磁碟管理功能。

虛擬機器的磁碟存取一般可分兩種方式,一為讓虛擬機器存取虛擬平臺模擬出來的虛擬磁碟,虛擬磁碟的實體是虛擬磁碟檔案(如 .VMDK 或 .VHD)。另一為讓虛擬機器直接存取實體磁碟,也就是將儲存設備的磁碟區直接掛載給虛擬機器使用,不經由檔案系統的中介,即所謂的原生裝置(Raw Device)模式。

{{item1{存取虛擬磁碟檔依虛擬化類型而異}}}
透過檔案系統儲存虛擬機器的虛擬磁碟檔案,是最普遍的一種虛擬環境儲存模式,依虛擬平臺屬於寄居式(Hosted,需安裝在已備妥作業系統的主機上)或裸機(Bare Metal,可直接安裝在無作業系統的主機)架構,有兩種作法:

''1. 寄居式虛擬環境的磁碟存取架構 (使用寄居作業系統的檔案系統)''
寄居式架構的虛擬平臺,必須建立在底層主機既有的作業系統上,因此虛擬機器的虛擬磁碟,也就是直接存放在底層作業系統透過檔案系統提供的儲存空間上。

這種存取設定最為簡單,也最容易理解。先準備好裝有 Windows 或 Linux 等作業系統的實體機器,然後安裝 VMware Workstation、Virtual Server 等寄居式架構的虛擬平臺。接下來建立虛擬機器的虛擬磁碟時,將對應的虛擬磁碟檔案(.VMDK或.VHD等),存放在底層 Windows 或 Linux 的NTFS、Ext3/4 檔案系統提供的空間即可。

這種作法的問題是,虛擬機器對實體硬碟的存取動作,都須經過兩層檔案系統的控制,包括虛擬機器內 Guest 作業系統的檔案系統,以及底層實體電腦作業系統的檔案系統,效能相對較差。

''2. 裸機式虛擬環境的存取架構 (使用專屬的檔案系統)''
裸機式架構的虛擬平臺無須透過既有作業系統的中介,Hypervisor 可直接安裝於實體電腦上。不過由於這種架構沒有底層作業系統,因此虛擬平臺便得自己扮演作業系統的角色,以便為上層的應用提供包括硬體資源配置、協調與檔案存取管理在內的基本服務。

Hypervisor 可視為一種 microkernel 類型的作業系統,負責執行緒管理(Thread management)、記憶體管理與內進程通訊(Inter-process communication,IPC)等核心作業,而完整作業系統所需的其他功能如驅動程式(Device Driver)或檔案管理等,則由另外搭配的 User-mode Servers 提供相關服務,IPC 就是用於溝通這些服務。

以 Xen 為基礎的虛擬化平臺如 Citrix Xen、Sun xVM Server 與 Virtual Iron、Microsoft Hyper-V 等來說,是透過 Domain 0(即主戰機 HOST)中經過特別修改的作業系統,來為整個環境的虛擬機器提供檔案存取服務,只有 Domain 0 會存取實體磁碟,其他虛擬機器(Domain U)都是透過 Hypervisor 聯繫 Domain 0,存取模擬出來的虛擬磁碟。

因此視 Domain 0 中的作業系統不同,不同虛擬平臺提供的檔案系統類型也就不同。如 ''Sun xVM Server'' 平臺是以 Solaris 10 做為 Domain 0,故其提供的檔案系統就是 ZFS。而採用 SUSE Linux 核心的 ''Virtual Iron'',其檔案系統便是一般 Linux 常見的 Ext3、''Hyper-V 則是 Windows 標準的 NTFS'' 等。

較特別的是 VMware 雖可使用 ext3 這種 Linux 標準的檔案系統,但只用於存放開機、log 等系統檔案,至於用於存放虛擬磁碟檔案的磁碟區,則採用 VMware 專屬的檔案系統上,也就是 ''VMware Infrastructure 3'' 架構中的 ''VMFS(Virtual Machine File System)''。

VMFS 是 VMware ESX 為 虛擬環境應用而設計的專屬檔案系統(VMFS 格式的磁碟區不能為其他作業系統辨識),可為不同虛擬機器分別調節 Volume、區塊(Block)與檔案的大小,以為虛擬機器提供最適當的 I/O 效能,還能讓最多 32 臺 ESX 伺服器同時存取一個 VMFS Volume,效能應該會比一般作業系統的檔案系統更好。

''VMware VMFS Wikipedia : '' http://en.wikipedia.org/wiki/VMware_VMFS

{{item1{無須中介的原生裝置存取模式}}}
所謂原生裝置(Raw Device)存取模式,是指讓虛擬機器直接存取實體磁碟機,而不是存取虛擬平臺模擬出來的虛擬磁碟機(實際上是對應於一組特定檔案),因此存取動作不會經過作業系統或虛擬平臺的檔案系統這一層,可讓資料直接從實體磁碟到虛擬機器進行傳輸。

在這種模式下,可把後端儲存設備上指定的 LUN 直接掛載給前端虛擬機器使用,因此更能確保虛擬機器所享有的 I/O 資源,只要在儲存設備上設好特定 LUN 占用的傳輸通道即可。

相對於前述虛擬磁碟檔案模式,由於多個虛擬機器的虛擬磁碟檔案都存放在同一個 Volume 上,也就等於所有虛擬機器必須共享這個 Volume 的存取通道,只能由虛擬平臺來協調 I/O 資源的優先使用順序。所以對於存取非常頻繁的應用程式來說,Raw Device 理論上可大幅提高存取效能。

理論上,透過 Raw Device 模式,即可讓虛擬機器直接存取實體磁碟中的資料,無須透過虛擬磁碟檔案的中介,但不同虛擬平臺的 Raw Device 模式又有所差異,如 VMware ESX 的 Raw Device 模式,仍須在 VMFS 中產生一組對應於 Raw Device 的映射位址與基本設定的檔案(Map file),虛擬機器必須參照這組檔案去存取對應的實體磁碟區,VMware 稱這種模式為 Raw Device Mapping。

而另一些虛擬平臺如 Virtual Iron 或 Citrix XenServer 的 Raw Device 模式,則不需要依靠這類檔案的中介,只要透過虛擬管理伺服器為虛擬機器指派存取的磁碟區即可。 

[img[img/virtualdisk01.jpg]]

[img[img/virtualdisk02.jpg]]

[img[img/virtualdisk03.jpg]]

''兩種儲存模式的優劣''
顯然的,直接存取實體磁碟的 Raw Device 模式,理應具有較佳的效能,但這只是純就理論上而言,實際上的存取效能,仍須視資料大小與應用程式存取行為特性而定。

從另一方面來說,效能也不是決定採用何種儲存架構的唯一考量因素,透過虛擬磁碟存取仍有許多 Raw Device 所不具備的優點。例如虛擬磁碟檔案的配置更為彈性,一個磁碟區即可同時存放多個虛擬磁碟檔案,而 Raw Device 則必須將磁碟區切割為多個 LUN,分別掛載到指定的虛擬機器上,容量利用效率較差,管理者必須事先在儲存設備上,做好儲存區域的劃分,並需搭配SAN 架構。

另外若採用虛擬磁碟檔案,亦便於使用虛擬平臺內建的多種管理功能,如對虛擬磁碟執行快照(Snapshot)與複製(Clone)等。

不過對於已擁有 SAN 儲存設備的企業用戶來說,若要在虛擬環境中充分利用 SAN 儲存設備內建的快照、複製等進階磁碟應用功能,則 Raw Device 模式就是較好的選擇,而且透過儲存設備內建功能來執行快照,對前端系統不會造成負擔。相對的由虛擬平臺執行的快照作業,就會嚴重影響頂層 Guest 作業系統的效能。

而從系統部署或遷移的觀點來看,當用戶打算將既有的實體機器轉換到虛擬環境時,若在儲存方面採用虛擬磁碟模式,則需透過轉換工具將原來的實體磁碟機資料轉換為虛擬磁碟格式,而這種 P2V 作業十分費時。若用戶擁有 SAN 環境,且原來的實體伺服器就是存取後端 SAN 儲存設備提供的磁碟空間,則此時可採用 Raw Device 模式,直接將 SAN 磁碟機指派給虛擬機器存取即可,可省下花費在 P2V 作業上的時間。文⊙張明德 
<<toBalaNotes "1">>
''Logical Unit Number (LUN)''
In computer storage, a logical unit number or LUN is a number used to identify a logical unit, which is a device addressed by the SCSI protocol or similar protocols such as Fibre Channel or iSCSI. A LUN may be used with any device which supports read/write operations, such as a tape drive, but is most often used to refer to a logical disk as created on a SAN. Though not technically correct, the term "LUN" is often also used to refer to the drive itself.
///%1
//%/
''參考文章''
1. Windows VirtIO Drivers
http://www.linux-kvm.org/page/WindowsGuestDrivers/Download_Drivers
2. Latest Release of Windows Virtio Network Drivers
http://www.linux-kvm.com/content/latest-release-windows-virtio-network-drivers
3. Redhat 5.4 Windows Virtio Drivers Part 2: Block Drivers
http://www.linux-kvm.com/content/redhat-54-windows-virtio-drivers-part-2-block-drivers

{{item1{建立新的虛擬主機}}}
使用 ''虛擬機器管理員'' 工具, 新建的虛擬機器,其定義檔儲存在 ''/etc/libvirt/qemu'' 目錄中 (虛擬機器名稱.xml), 硬碟映像檔 (*.img) 則儲存在 ''/var/lib/libvirt/images'' 目錄中

[img[img/kvm/CreateVM01.png]]

[img[img/kvm/CreateVM02.png]]

[img[img/kvm/CreateVM03.png]]

[img[img/kvm/CreateVM04.png]]

[img[img/kvm/CreateVM05.png]]

[img[img/kvm/CreateVM06.png]]

<<toBalaNotes "1">>
{{item1{直接使用硬碟映像檔, 建立虛擬主機}}}

[img[img/kvm/img2createvm01.png]]

[img[img/kvm/img2createvm02.png]]

[img[img/kvm/img2createvm03.png]]

[img[img/kvm/img2createvm04.png]]

[img[img/kvm/img2createvm05.png]]

[img[img/kvm/img2createvm06.png]]

<<toBalaNotes "2">>
{{item1{檢視虛擬主機設定}}}

[img[img/kvm/CreateVM07.png]]

''ACPI (Advanced Configuration and Power Interface)''
''APIC: Advanced Programmable Interrupt Controller (Intel, PIC) ''

[img[img/kvm/CreateVM08.png]]

[img[img/kvm/CreateVM09.png]]

[img[img/kvm/CreateVM10.png]]

<<toBalaNotes "3">>


///%1
//%/

///%2
//%/

///%3
//%/
{{item1{在 iLab 目錄中, 產生 Lab168 虛擬教學模組目錄}}}
{{{
$ cd ~/iLab
$ mkdir Lab168
$ cd Lab168
}}}
{{item1{產生 Vyatta 虛擬電腦}}}

''1. 產生虛擬硬碟''
{{{
$ kvm-img create -f qcow2 vyatta254.qcow2 1G
Formatting 'vyatta254.qcow2', fmt=qcow2 size=1073741824 encryption=off cluster_size=65536
}}}

''2. 開始安裝''
{{{
$ virt-install -r 512 -n vyatta254 --disk path=./vyatta254.qcow2,format=qcow2 --noreboot --cdrom ~/vyatta-livecd_VC6.5R1_amd64.iso 
}}}

''[註]'' vyatta-livecd_VC6.5R1_amd64.iso 可由 http://www.vyatta.com/downloads/vc6.5/vyatta-livecd_VC6.5R1_amd64.iso 這網址取得, 請下載至家目錄

''3. 登入畫面, 輸入 vyatta/vyatta''

''4. 輸入 install system 命令, 開始將 Vyatta 系統安裝至硬碟''

$ install system
The wizard is opened and you are going to through installation process:
Would you like to continue? (Yes/No) [YES]: ''Enter''
Partition (Auto/Union/Parted/Skip) [Auto]: ''Enter''
Install the image on? [sda]: ''Enter''
This will destroy all data on /dev/sda.
Continue? (Yes/No) [No]: ''Yes''
How big of root partition should I create? (1000MB – 1074MB) [1074]MB: ''Enter''
I found the following configuration files
/opt/vyatta/etc/config/config.boot

Which one should I copy to sda? [/opt/vyatta/etc/config/config.boot] ''Enter''
Enter password for administrator account
Enter vyatta password: ''student''
Retype vyatta password: ''student''
Which drive should GRUB modify the boot partition on? [sda]: ''Enter''
Done!

''5. 顯示系統版本資訊''
{{{
vyatta@vyatta:~$ show version
Version:      VC6.1-2010.08.20
Description:  Vyatta Core 6.1 2010.08.20
Copyright:    2006-2010 Vyatta, Inc.
Built by:     autobuild@vyatta.com
Built on:     Fri Aug 20 04:27:08 UTC 2010
Build ID:     1008200429-170b446
Boot via:     image
Uptime:       15:02:36 up 5 min,  1 user,  load average: 0.00, 0.01, 0.00
}}}

''6. 關機''
{{{
$ sudo shutdown -h now
}}}

{{item1{在 Lab168 目錄產生 vyatta254.xml 設定檔}}}
{{{
$ virsh dumpxml vyatta254 > vyatta254.xml
}}}

{{item1{移除 vyatta254 虛擬電腦}}}
{{{
$ virsh undefine vyatta254
區域 vyatta254 已經取消定義
}}}

{{item1{修改 vyatta254 虛擬電腦設定檔}}}
{{{
$ nano vyatta254.xml

<domain type='kvm'>
  <name>vyatta254</name>                                                    # 設虛擬主機名稱, 要確定具有唯一性
  <uuid>88c793a6-7d32-28e0-1bbf-7f42be1d475d</uuid>            # 刪除這行, 建立虛擬主機時, 會重新產生
  <memory>524288</memory>
  <currentMemory>524288</currentMemory>
  <vcpu>1</vcpu>
  <os>
    <type arch='x86_64' machine='pc-1.0'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <clock offset='utc'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>restart</on_crash>
  <devices>
    <emulator>/usr/bin/kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2'/>
      <source file='-vmdk-vyatta254.qcow2'/>                             #  利用 "-vmdk-" 置換字串, 給予虛擬硬碟檔正確完整目錄
      <target dev='hda' bus='ide'/>
      <address type='drive' controller='0' bus='0' unit='0'/>
    </disk>
    <disk type='block' device='cdrom'>
      <driver name='qemu' type='raw'/>
      <target dev='hdc' bus='ide'/>
      <readonly/>
      <address type='drive' controller='0' bus='1' unit='0'/>
    </disk>
    <controller type='ide' index='0'>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>    
    </controller>
    <interface type='user'>                                                                 # 將 type='user' 改成 type='network'
      <mac address='52:54:00:b4:05:c6'/>                                           # 修改 MAC 位址, 要確定具有唯一性
      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>    # 請移除此行
      <source network='default'/>                                                         # 加入這一行, 虛擬網路名稱要一致 (注意大小寫)
    </interface>
    <serial type='pty'>
      <target port='0'/>
    </serial>
    <console type='pty'>
      <target type='serial' port='0'/>
    </console>
    <input type='mouse' bus='ps2'/>
    <graphics type='vnc' port='-1' autoport='yes'/>
    <video>
      <model type='cirrus' vram='9216' heads='1'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
    </video>
    <memballoon model='virtio'>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </memballoon>
  </devices>
</domain>
}}}

<<toBalaNotes "1">>
{{item1{編輯 Lab168 虛擬教學模組設定檔 - Lab.xml}}}
{{{
$ nano Lab.xml 
<KVMLab>
  <name>Lab168</name>
  <description>Networking 168 : Vyatta Network</description>

  <!-- 
     Lab 運作資訊
  -->
  <task>
    <job>VM Description</job>

    <!-- Lab 網路架構圖 -->
    <map>Lab168.png</map>
  </task>

  <!-- 
      指定虛擬硬碟檔的存放目錄, "-user-" 代表登入的帳號名稱
  -->
  <store>/home/-user-/myLab/</store>

  <!-- 
      Lab 啟動設定 
  -->
  <labup>
    <!-- 
      Lab 啟動前需先關閉以下虛擬主機 

        S : 代表使用 "virsh shutdown" 關機
        D : 代表使用 "virsh destroy" 關機
    -->
    <shutdown>
    </shutdown> 
  </labup>

  <!--
    Lab 所需網路設備
      X:SH
      |
      H : 代表 Switch HUB 
  -->
  <network>
  </network>

  <!--
    Lab 所需虛擬主機

      xx:vm:IP(MAC),IP(MAC):Port
      ||
      ||*** S :代表 shutdown, D :代表 destroy
      | 
      |**** U :代表啟動 VM, D :代表不啟動 VM
  -->
  <vm>
    US:vyatta254:192.168.122.xx(ff),192.168.100.254(5):
  </vm>

</KVMLab>
}}}

{{item1{產生 Lab168 虛擬教學模組}}}
{{{
$ cd ~/iLab
$ sudo ./labcmd.sh create -f Lab168

=> 開始建立 Lab168/ 虛擬網路 (Virtual Network)

=> 開始建立 Lab168/ 虛擬電腦 (Virtual Machine)

複製 vyatta254.qcow2 檔案 ... 成功
建立 vyatta254 虛擬電腦完成
}}}

{{item1{啟動 Lab168 虛擬教學模組}}}
{{{
$ sudo ./labcmd.sh start Lab168

啟動 Lab168 所有虛擬電腦
-----------------------------
vyatta254 啟動中 .... 成功
}}}

{{item1{登入 vyatta254 虛擬電腦}}}
{{{
$ sudo virsh console vyatta254
Connected to domain vyatta254
Escape character is ^]

Welcome to Vyatta - vyatta ttyS0

vyatta login: vyatta
Password: 
}}}

按 "ctrl + ]" 脫離 vyatta254 的 Console 模式

{{item1{停止 Lab168 虛擬教學模組}}}
{{{
$ sudo ./labcmd.sh stop Lab168

關閉 Lab168 所有虛擬電腦
-----------------------------
vyatta254 已關機
}}}

{{item1{移除 Lab168 虛擬教學模組}}}
{{{
$ sudo ./labcmd.sh delete -f Lab168

=> 開始刪除 Lab168/ 虛擬網路 (Virtual Network)

=> 開始刪除 Lab168/ 虛擬電腦 (Virtual Machine)

刪除 vyatta254 虛擬電腦成功
刪除 vyatta254.qcow2 虛擬電腦硬碟檔成功
}}}

<<toBalaNotes "2">>



///%1
//%/

///%2
//%/
''參考文章''
1. SquashFS HOWTO
http://tldp.org/HOWTO/html_single/SquashFS-HOWTO/

{{item1{手動設定}}}

''1.掛載 FloppyFW 磁碟映像檔''
{{{
$ sudo mount -t msdos -o loop floppyfw.img /media/floppy/
}}}

@@color:red;[注意] 虛擬電腦要先關機@@

''2. 修改 FloppyFW 的 TCP/IP 網路設定''

''$ nano /media/floppy/config''  
{{{
#
# Configuration for floppyfw

# $Id: config,v 1.12 2005/09/24 17:50:01 thomasez Exp $
                                     :
#=============================================================================
#
# Basic configuration:
#

# Select the type of your OUTSIDE connection:

# CONNECT_TYPE=PPP	# if you use a modem

# CONNECT_TYPE=PPPoE    # if you use some type of DSL that uses PPPoE (Most of them do)

# 下面設定可使對外網卡從 DHCP Server 取得 IP, 對內網卡可以使用靜態 IP
CONNECT_TYPE=DHCP       # For getting the IP address by DHCP, often used on cable modems. 

# 下面設定迫使對外網卡使用靜態 IP
#CONNECT_TYPE=STATIC   # For the good old LAN/WAN connections with static IP addresses on the outside.

# CONNECT_TYPE=EXTERNAL # is a hook for booting a external startup
                        # script from a package.
                        # the external script has to be in /etc/ext-up.init

                                            :
#
# Physical device layout:
#
# In the files /etc/outside.info and /etc/inside.info you will see 
# OUTSIDE_DEVICE and INSIDE_DEVICE, those are the logical devices used by
# firewall.init and friends..
#
# If you have only one network card you can set INSIDE_DEV to eth0:0
# and it will use ip aliasing to make it all work.
#
# You can also use device.VLAN-ID to automatically set it as a VLAN.
#
OUTSIDE_DEV=eth0 
INSIDE_DEV=eth1
#
# Configuration for floppyfw
# Outside network:

OUTSIDE_IP= 10.37.72.163

#OUTSIDE_IP=DHCP
#OUTSIDE_IP=EXTERNAL
#
# The external script can requires you to add your username and password, # as the PPPoE package does. This is where you can add that.

USER_IDENT=
USER_PASSWORD=

# eth0 default device.
#

OUTSIDE_DEV=eth0
OUTSIDE_NETMASK=255.255.255.240
OUTSIDE_NETWORK=10.37.72.160
OUTSIDE_BROADCAST=10.37.72.175

#
# eth1 is the default device for the internal network.
#

INSIDE_IP=176.16.1.100
INSIDE_DEV=eth1
INSIDE_NETMASK=255.255.255.0
INSIDE_NETWORK=176.16.1.0
INSIDE_BROADCAST=176.16.1.255

#
# Misc
# These are not necessary to set if you are using DHCP.
#

DEFAULT_GATEWAY=10.37.72.165
NAME_SERVER_IP1=192.168.1.10
NAME_SERVER_IP2=192.168.1.34
DOMAIN=techrepublic.com
HOSTNAME=tr-admin

#
# (y)es or (n)o
#

OPEN_SHELL=y
ONLY_8M=n

#
# Turning on syslogd and klogd.
# This is a nice thing but will eat CPU which is why it is turned
# off by default.
#

USE_SYSLOG=y
SYSLOG_FLAGS="-m 360"

# If you don't like the -- MARK -- messages:
#SYSLOG_FLAGS="-m 0"
}}}

''3. 卸載 FloppyFW 磁碟映像檔''
{{{
$ sudo umount /media/floppy/
}}}

{{item1{程式自動設定}}}

''執行 setffw.sh 程式, 會自動掛載 image 檔, 修改 config 設定檔後, 會自動卸載 image 檔''
{{{
$ setffw.sh  -i  ffwNAT
}}}

{{item1{關閉 防火牆 功能}}}
{{{
$ nano /media/floppy/firewall.ini

#!/bin/sh

# $Id: firewall.ini 120 2006-09-21 22:17:36Z root $

# If you want the box to just act as a router, uncomment the 2 lines below
# echo 1 > /proc/sys/net/ipv4/ip_forward
exit 0                                                          # 移除此行備註, 關閉 防火牆 功能

#
# Firewall setup.
#
. /etc/config

#
# Do you want to do port forwaring to an internal server?
# Set the server IP here and sort out the port stuff later in this file.
#
SERVER_IP=192.168.122.10

                                       :
                                       :
}}}
<<toBalaNotes "1">>

{{item1{setffw.sh 原始碼}}}
{{{
#!/bin/bash

[ -z "$1" ] && echo "使用格式 :setffw [-i | -n] imagefile" && exit 1 
[ -z "$2" ] && echo "請給 image 檔名" && exit 1

case  "$1"  in
     '-i')
          if [ -f /var/lib/libvirt/images/$2.img ]; then
             mount -t msdos -o loop /var/lib/libvirt/images/$2.img /media/floppy/
             nano /media/floppy/config
             umount /media/floppy
          else 
            echo "image 檔不存在" 
          fi
          ;;
     '-n')
          if [ -f /var/lib/libvirt/images/$2.img ]; then
             mount -t msdos -o loop /var/lib/libvirt/images/$2.img /media/floppy/
             nano /media/floppy/network.ini
             umount /media/floppy
          else 
            echo "image 檔不存在" 
          fi
         ;;
     *)
         echo "不認識的參數"
         exit 1
         ;;
esac

exit 0
}}}

{{item1{SquashFS}}}
維基網址 : http://en.wikipedia.org/wiki/SquashFS

///%1
//%/
''參考文章''
1. Vyatta Internet Gateway Router Howto - VPN 
http://www.sonoracomm.com/support/19-inet-support/251-vyatta-vpn

{{item1{The Vyatta CLI and the System Shell}}}

''命令操作模式''
There are two command modes in the Vyatta CLI: operational mode and configuration mode.
''- Operational mode'' provides access to operational commands for showing and clearing information and enabling or disabling debugging, as well as commands for configuring terminal settings, loading and saving configuration, and restarting the system.

''- Configuration'' provides access to commands for creating, modifying, deleting, committing and showing configuration information, as well as commands for navigating through the configuration hierarchy.

When you log on to the system, the system is in ''operational mode''.
- To enter configuration mode from operational mode, issue the ''configure'' command.
- To return to operational mode from configuration mode, issue the ''exit'' command. If there are uncommitted configuration changes, you must either commit the changes using the commit command, or enter exit discard to discard the changes before you can exit to operational mode.

Issuing the ''exit'' command in operational mode logs you out of the system.

''Enter configuration mode''
In configuration mode you can set, delete, and show information. Enter configuration mode by
typing configure at the command prompt in operational mode.
{{{
vyatta@vyatta:~$ configure
[edit]
vyatta@vyatta#
}}}

''Exit configuration mode''
Exiting configuration mode returns you to operational mode.
{{{
vyatta@vyatta# exit
exit
vyatta@vyatta:~$
}}}
<<toBalaNotes "1">>

{{item1{TCP/IP 網路設定}}}

''1. 設定 IP 位址''
Configure interface eth0 with an IP address of 192.168.122.22 and a prefix length of 24. This will allow the system to reach the DNS server and default gateway configured in subsequent steps.
{{{
vyatta@vyatta:~$ configure
vyatta@R1# set interfaces ethernet eth0 address 192.168.122.22/24
[edit]
vyatta@R1# commit
[edit]
vyatta@vyatta# save
Saving configuration to '/opt/vyatta/etc/config/config.boot'...
Done
}}}

''2. 指定 DNS Server''
In our example, the DNS server is at IP address 168.95.1.1 Add the DNS server using the
set system name-server command.
{{{
vyatta@R1# set system name-server  168.95.1.1
[edit]
vyatta@R1# commit
vyatta@vyatta# save
Saving configuration to '/opt/vyatta/etc/config/config.boot'...
Done

}}}

''3. 指定 Default Gateway''
Add the default gateway using the set system gateway-address command.
{{{
vyatta@R1# set system gateway-address 192.168.122.1
[edit]
vyatta@R1# commit
[edit]
vyatta@vyatta# save
Saving configuration to '/opt/vyatta/etc/config/config.boot'...
Done
vyatta@vyatta# exit
}}}

''4. 顯示網卡設定''
{{{
vyatta@vyatta# show interfaces
 ethernet eth0 {
     address 192.168.122.22/24
     hw-id 52:54:00:12:34:56
 }
 ethernet eth1 {
     address 192.168.66.10/24
 }
 loopback lo {
 }
[edit]
}}}

<<toBalaNotes "2">>

{{item1{啟動 Vyatta 遠端管理 (SSH、HTTP 等)}}}

''1. 啟動網站管理服務''
{{{
$configure

[edit]
# set service https
[edit]
root@GWVT100# commit
[ service https ]
Generating a 1024 bit RSA private key
.................++++++
........++++++
writing new private key to '/etc/lighttpd/server.pem'
-----

[ service https ]
Stopping web server: lighttpd.
Starting web server: lighttpd.
Stopping PAGER server
Starting PAGER server

[edit]
root@GWVT100# save
Saving configuration to '/config/config.boot'...
Done
[edit]
}}}

之後就可以用網頁介面,  登入管理 Vyatta 
{{{
https://x.x.x.x   (Vyatta 的IP) 
}}}

''[註]'' 如使用 Vyatta 社群版, 沒有提供此功能, 請看以下訊息
{{{
The Vyatta web-based management interface is available in the Subscription Edition of the Vyatta Network OS. Please contact Vyatta if you wish to obtain a license. 
}}}

''2. 啟動 SSH/Telnet 服務''
{{{
#set service ssh port 22
#set service telnet port 23
}}}

<<toBalaNotes "3">>

{{item1{直接修改設定}}}
{{{
$ sudo nano /config/config.boot  
interfaces {
    ethernet eth1 {
        address 192.168.66.254/25
        duplex auto
        hw-id 52:54:00:66:fe:63
        speed auto
    }
    ethernet eth0 {
        address 192.168.100.66/24
        duplex auto
        hw-id 52:54:00:a0:66:63
        speed auto
    }
    loopback lo {
    }
}
system {
    config-management {
        commit-revisions 20
    }
    console {
        device ttyS0 {
            speed 9600
        }
    }
    name-server 168.95.1.1
    gateway-address 192.168.122.1
    host-name GW166
    login {
        user vyatta {
            authentication {
                encrypted-password $1$HZ71C4bm$Gg42Wxzdg8uineJSenNWc.
            }
            level admin
        }
    }
    ntp {
        server 0.vyatta.pool.ntp.org {
        }
        server 1.vyatta.pool.ntp.org {
        }
        server 2.vyatta.pool.ntp.org {
        }
    }
    package {
        auto-sync 1
        repository community {
            components main
            distribution stable
            password ""
            url http://packages.vyatta.com/vyatta
            username ""
        }
    }
    syslog {
        global {
            facility all {
                level notice
            }
            facility protocols {
                level debug
            }
        }
    }
    time-zone GMT
}


/* Warning: Do not remove the following line. */
/* === vyatta-config-version: "dhcp-server@4:content-inspection@3:webproxy@1:wanloadbalance@3:cluster@1:nat@3:webgui@1:config-management@1:quagga@2:qos@1:vrrp@1:ipsec@3:system@4:dhcp-relay@1:firewall@4:conntrack-sync@1:zone-policy@1" === */
/* Release version: VC6.3-2011.07.21 */

}}}

<<toBalaNotes "4">>

{{item1{啟動 Vyatta NAT 功能}}}

''1. 設定服務''
{{{
$ configure
[edit]
# edit service nat rule 1

[edit service nat rule 1]
#set source address 192.168.100.0/24  (表示來源,即LAN網路的區段)
#set outbound-interface eth0     (出去的網卡介面,WAN網路)
#set type masquerade              (轉換的型態)
}}}

''2. 確認設定''
{{{
# commit
}}}

''3. 儲存設定''
{{{
# save 
}}}

<<toBalaNotes "5">>

{{item1{還原最初設定}}}
{{{
# load /opt/vyatta/etc/config.boot.default
# save
}}}

///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/

///%5
//%/
__{{item1{文章建立與 Wiki 語法}}}__
1. 點選工具列中的 [''新增文章''] 按鈕, 開始編輯文章

2. 輸入以下文字, 輸入完成後, 請點選 [''完成''] 按鈕
{{{
我的第一篇文章
|Item1|Item2|
}}}
3. 結果如下 : 

我的第一篇文章
|Item1|Item2|

4. 點選工具列中的 [''儲存變更''] 按鈕, 儲存新增文章

''[注意]'' 切勿點選瀏覽器 (IE, Firefox,..) 的 [檔案][另存新檔] 按鈕, 儲存整個網頁. 完整 Wiki 語法請參考 [[Tiddlywiki 練功坊|http://tiddlywiki.tbala.net]]

__{{item1{文章的刪除}}}__
1. 點選欲刪除的文章

2. 點選 [編輯] 按鈕, 進入編輯模式

3. 點選 [刪除] 按鈕, 然後點選 [確定] 按鈕, 進行刪除

<<toBalaNotes "wiki">>


///%wiki
//%/

''參考文章''
1. 雲端運算的儲存基礎架構 - 揭開雲端儲存的面貌 ''(必讀)''
http://www.runpc.com.tw/content/cloud_content.aspx?id=105324
2. Drobo 11月新推iSCSI SAN內建自動分層儲存 
http://www.ithome.com.tw/itadm/article.php?c=69421
3. Dell 中階儲存系統新增自動分層 
http://www.ithome.com.tw/itadm/article.php?c=69784
4. Anatomy of a cloud storage infrastructure
http://www.ibm.com/developerworks/cloud/library/cl-cloudstorage/
5. EMC 率先推出伺服器端 I/O 快取卡  ''(必讀)''
http://www.ithome.com.tw/itadm/article.php?c=72360

{{item1{雲端運算的儲存基礎架構}}}
談到雲端儲存(Cloud storage),簡單來說,就是將儲存資源放到網路上供人存取的一種新興服務。如此一來,使用者可以在任何時間、任何地方,透過任何可連網的裝置方便地存取資料。若方案供應商能進一步確保資料的安全無虞,同時又提供許多資料檢索及管理的功能,使用者又何必不定期地花錢購買、安裝、設定或擴充儲存設備呢?尤其對於定期會有龐大資料備份需求的使用者或企業來說,設備的管理及擴充絕對是一大夢魘及負擔。

就一般使用者而言,雲端儲存及類似方案似乎處處可見。值得注意的趨勢,就是雲端儲存所支援的存取裝置也從桌上電腦主機,慢慢擴展到手機等行動裝置上。換句話說,必須透過電腦上網存取資料的時代已然過去,機動性更強的手機提供更具彈性的雲端資料存取方案。當前甚至有雲端音樂串流服務-ZumoDrive 的推出,iPod/iPhone 的使用者可以事先將音樂丟到線上儲存空間中,然後再透過無線網路播放音樂串流,相當方便。

{{item1{自動分層儲存技術走入主流應用}}}
利用分層概念建置儲存系統,並按存取頻率,動態地讓資料在不同層級儲存裝置上,自動遷移存放位置,企圖在效能與成本間取得最佳平衡

分層儲存概念由來已久,可在不同等級儲存設備間自動搬移資料階層式儲存管理(Hierarchical Storage Management)產品,問世亦有十多年時間。

早期的HSM產品多半採用獨立部署的軟體形式,而且是以面對應用程式的方式運作。後來Compellent、3PAR等廠商則推出了整合在磁碟陣列控制器中的自動分層儲存功能,可作為儲存設備底層的基礎服務,免除另外安裝軟體的需要,還能提供更精細的管理功能。

近年來隨著高性能固態儲存裝置(SSD)的發展,進一步推動了自動分層儲存技術的需求,就連儲存界龍頭廠商EMC,亦在不久前推出了可適用於旗下主力儲存產品線的FAST全自動分層儲存技術,讓自動分層儲存技術走出過去僅針對特定領域、或新創公司產品的局面,開始進入主流廠商產品線中。

''目的是兼顧效能與成本''
階層式儲存管理的目的,是利用不同效能/價格的儲存設備組成儲存系統,藉以分別適應前端不同應用的需求,讓關鍵應用使用高效能儲存裝置,非關鍵應用則使用效能較低但也較廉價的儲存裝置,以便在控制儲存成本的同時,又能確保關鍵應用可獲得必要的效能。

顯然的,要讓階層式儲存管理發生效用,關鍵在於如何為前端不同需求的應用,適當地分配由不同層級儲存設備提供的空間,特別是必須因應前端存取行為的變化,持續調整後端儲存空間配置,以便讓儲存空間利用效率,始終維持在一定的水準上。

[img[img/hfs01.jpg]]

{{item1{CDMI (Cloud Data Management Interface)}}}
SNIA 官方網址 : http://www.snia.org/cloud

網路存儲工業協會 (SNIA) 今天在 SNW 大會上,宣佈了雲存儲倡議 (CSI,Cloud Storage Initiative),這是一項新計劃,意在推動雲數據管理介面 (CDMI) 標準的採納和發展。據 SNIA 主席 Wayne Adams 稱,CIS 將用於SNIA的雲存儲技術工作組(TWG)。該小組去年春天宣佈成立,現已擁有140多名成員。該 CSI 將與TWG 一起規範雲存儲技術發展的國際標準。SNIA 目前正在關注的一個問題之一就是,在技術發展之前,基於專業介面的雲存儲被用戶接受的可能性,以及數據訪問的便攜性、法規遵從和安全標準。Adams 表示,雲存儲領域目前仍然在上演著市場爭奪戰。廠商會繼續擴大產品及服務供應規模。SNIA 內部與雲存儲相關的事務將由 CSI 進行組織協調,包括對培訓、技術開發、業務開發、市場行銷和實施。SNIA 組織的第一項議程是 CDMI 草案。SNIA 雲存儲、NDMP 和XAM SDK 技術工作組主席 Mark Carlson (他同時也是 SNIA 技術委員會成員)表示,總的來說 CDMI 是置於文件系統頂部可以模擬雲的一段代碼 。

該代碼需要 BSD 授權協議,Carlson 希望可以推動開發者協助 SNIA 制定相關標準。借助 CDMI,可以為雲計算應用創建、提供和配置存儲空間。它還能夠創建一條你輸入雲端數據的管理路徑。它可以幫助您確保雲中數據的備份、壓縮、重復數據刪除、加密等。最近首份 CDMI 草案已經發佈到網上,且公眾可對其發表評論。Carlson 說,SNIA 組織正朝著 1.0 版本努力,爭取明年發佈。CSI 的創始成員包括 Actifio,Bycast,EMC,Hitachi,HP,LSI,NetApp,Olocity,Sun,賽門鐵克和 Xiotech。

<<toBalaNotes "csi">>


///%csi
//%/

{{item1{根據核心參數, 自動設定 TCP/IP 網路}}}

''1. 撰寫程式''
{{{
$ cd ~/tsc532
$ sudo nano opt/bootlocal.sh 
k=$(cat /proc/cmdline)

ipv4=${k##*ipv4=}
ipv4=${ipv4%% *}

nat=${k##*nat=}
nat=${nat%% *}

if [ ! -z "$ipv4" ]; then
   ip=$(echo "$ipv4" | cut -d':' -f1)
   mask=$(echo "$ipv4" | cut -d':' -f2)
   gw=$(echo "$ipv4" | cut -d':' -f3)
   dns=$(echo "$ipv4" | cut -d':' -f4)

   ifconfig eth0 "$ip" netmask "$mask" up
   route add default gw $gw
   echo "nameserver $dns" > /etc/resolv.conf
fi
}}}

''2. 重製 initial RAM disk 檔案 (tsc532.gz)''
{{{
$ find | sudo cpio -o -H newc | gzip -2 > ../tsc532.gz 
}}}

{{item1{測試系統}}}
{{{
$ kvm -name "tsc532" -m 128 -kernel ../vmlinuz -initrd ../tsc532.gz -append "nodhcp ipv4=192.168.0.1:255.255.255.0:192.168.0.254:168.95.1.1"
}}}

{{item1{在終端機文字模式, 啟動虛擬電腦}}}
{{{
cd ~/tsc532
$ kvm -name "tsc532" -m 128 -kernel ../vmlinuz -initrd ../tsc532.gz -nographic -curses
}}}

''-nographic :'' Normally, QEMU uses SDL to display the VGA output. With this option, you can totally disable graphical output so that QEMU is a simple command line application. The emulated serial port is redirected on the console. Therefore, you can still use QEMU to debug a Linux kernel with a serial console.

''-curses :'' Normally, QEMU uses SDL to display the VGA output. With this option, QEMU can display the VGA output when in text mode using a curses/ncurses interface. Nothing is displayed in graphical mode.
<<toBalaNotes "1">>
{{item1{根據核心參數, 自動設定雙網卡}}}

''1. 撰寫程式''
{{{
$ cd ~/tsc532

# 程式中請勿輸入中文, 因 Tiny Server Core 系統無法處理中文
$ sudo nano opt/bootsync.sh
#!/bin/sh
echo "wait for 5 sec"
sleep 5

echo "" > /etc/issue
echo -e "Tiny Server Core 2.0 (Philosopher's Stone)" > /etc/motd
echo -e "-------------------------------------------" >> /etc/motd

echo "alias ping='ping -c 4'" >>/etc/profile
echo "alias bye='sudo poweroff'" >>/etc/profile

k=$(cat /proc/cmdline)

echo $k | grep 'ipv4=' &>/dev/null
if [ "$?" == "0" ]; then
   ipv4=${k##*ipv4=}
   ipv4=${ipv4%% *}
else
   ipv4=""
fi

if [ "$ipv4" != "" ]; then

   echo "$ipv4" | grep "," &>/dev/null
   if [ "$?" == "0" ]; then
      if1=$(echo "$ipv4" | cut -d',' -f1)
      if2=$(echo "$ipv4" | cut -d',' -f2)
   else
      if1="$ipv4"
   fi

   if [ ! -z "$if1" ]; then
      if [ "$if1" != "dhcp" ]; then
         ip=$(echo "$if1" | cut -d':' -f1)
         mask=$(echo "$if1" | cut -d':' -f2)
         gw=$(echo "$if1" | cut -d':' -f3)
         dns=$(echo "$if1" | cut -d':' -f4)

         echo "[eth0]" >> /etc/motd
         ifconfig eth0 "$ip" netmask "$mask" up 
         [ "$?" == "0" ] && echo "  IP : $ip" >> /etc/motd
         [ "$gw" != "" ] && route add default gw $gw && echo "  Gateway : $gw" >> /etc/motd
         [ "$dns" != "" ] && echo "nameserver $dns" > /etc/resolv.conf && echo "  DNS : $dns" >> /etc/motd
          
      else
         udhcpc -i eth0 -n &>/dev/null
         if [ "$?" == "0" ]; then
            echo "[eth0]" >> /etc/motd
            echo "  DHCP : enable" >> /etc/motd
         else
            echo "[eth0]" >> /etc/motd
            echo "  DHCP : failure" >> /etc/motd
         fi
      fi
   fi

   if [ ! -z "$if2" ]; then
      if [ "$if2" != "dhcp" ]; then
         ip=$(echo "$if2" | cut -d':' -f1)
         mask=$(echo "$if2" | cut -d':' -f2)
         gw=$(echo "$if2" | cut -d':' -f3)
         dns=$(echo "$if2" | cut -d':' -f4)

         echo "[eth1]" >> /etc/motd
         ifconfig eth1 "$ip" netmask "$mask" up 
         [ "$?" == "0" ] && echo 1 > /proc/sys/net/ipv4/ip_forward && echo "  IP : $ip" >> /etc/motd
         [ "$gw" != "" ] && route add default gw $gw && echo "  Gateway : $gw" >> /etc/motd
         [ "$dns" != "" ] && echo "nameserver $dns" > /etc/resolv.conf && echo "  DNS : $dns" >> /etc/motd

      else
         udhcpc -i eth0 -n &>/dev/null
         if [ "$?" == "0" ]; then
            echo "[eth1]" >> /etc/motd
            echo "  DHCP : enable" >> /etc/motd
         else
            echo "[eth1]" >> /etc/motd
            echo "  DHCP : failure" >> /etc/motd
         fi
      fi
   fi
fi

echo "" >> /etc/motd

echo $k | grep 'nat=' 
if [ "$?" == "0" ]; then
   nat=${k##*nat=}
   nat=${nat%% *}
   if [ "$nat" == "true" ]; then
      if [ -f  /usr/local/sbin/iptables ]; then
         iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE 
         [ "$?" == "0" ] && echo -e "NAT : Enable\n" >> /etc/motd
      else
         echo "need iptables.tcz" >> /etc/motd
      fi
   fi
fi

sleep 2
clear
/usr/bin/sethostname tsc532 &>/dev/null
/opt/bootlocal.sh &
}}}

''2. 重製 initial RAM disk 檔案''
{{{
$ find | sudo cpio -o -H newc | gzip -2 > ../tsc532.gz 
}}}

{{item1{測試系統}}}
在核心參數中, 輸入 nodhcp 參數, 取消 DHCP Client 功能, 然後使用 ipv4 參數, 設定雙網卡的 IP 位址
{{{
$ kvm -name "tsc532" -m 128 -kernel ../vmlinuz -initrd ../tsc532.gz -net nic,macaddr="52:54:00:bb:bb:bb" -net nic,macaddr="52:54:00:aa:aa:aa" -append "nodhcp ipv4=192.168.0.1:255.255.255.0:192.168.0.254:168.95.1.1,192.168.10.254:255.255.255.0::"
}}}
<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
''參考文章''
1. Common Android Virtual Device Configurations 
http://mobile.tutsplus.com/tutorials/android/common-android-virtual-device-configurations/

Android Virtual Devices 網址 : http://developer.android.com/guide/developing/tools/avd.html
Android Emulator 網址 : http://developer.android.com/guide/developing/tools/emulator.html

{{item1{認識 AVD}}}
''Android Virtual Devices (AVDs)'' are configurations of emulator options that let you better model an actual device.

Each AVD is made up of:

* ''A hardware profile''.  You can set options to define the hardware features of the virtual device. For example, you can define whether the device has a camera, whether it uses a physical QWERTY keyboard or a dialing pad, how much memory it has, and so on.
* ''A mapping to a system image''.  You can define what version of the Android platform will run on the virtual device. You can choose a version of the standard Android platform or the system image packaged with an SDK add-on.
* ''Other options''.  You can specify the emulator skin you want to use with the AVD, which lets you control the screen dimensions, appearance, and so on. You can also specify the emulated SD card to use with the AVD.
* ''A dedicated storage area on your development machine'', in which is stored the device's user data (installed applications, settings, and so on) and emulated SD card.

You can create as many AVDs as you need, based on the types of devices you want to model and the Android platforms and external libraries you want to run your application on.

In addition to the options in an AVD configuration, you can also specify emulator command-line options at launch or by using the emulator console to change behaviors or characteristics at run time. For a complete reference of emulator options, please see the Emulator documentation.

The easiest way to create an AVD is to use the ''graphical AVD Manager'', which you can launch from Eclipse or from the command line using the android tool. The android tool is provided in the tools/ directory of the Android SDK. When you run the android tool without options, it launches the ''graphical AVD Manager''.

{{item1{Android 顯示規格}}}

[img[img/android/skinscreen.png]]

{{item1{開始建立 Android Virtual Device}}}

''1. 在 /root/android-sdk-linux_86/tools 目錄中, 快點二下 android 程式, 啟動 AVD Manager''

[img[img/android/avdnew01.png]]

[img[img/android/avdnew02.png]]

[img[img/android/avdnew03.png]]

[img[img/android/avdnew04.png]]

[img[img/android/avdnew05.png]]

<<toBalaNotes "1">>

{{item1{啟動 Android Virtual Device}}}

[img[img/android/avdstart01.png]]

[img[img/android/avdstart02.png]]

[img[img/android/avdstart03.png]]

<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
{{item1{檢視 apache2.conf 檔案 (必讀)}}}
{{{
$ cat /etc/apache2/apache2.conf
#
# Based upon the NCSA server configuration files originally by Rob McCool.
#
# This is the main Apache server configuration file.  It contains the
# configuration directives that give the server its instructions.
# See http://httpd.apache.org/docs/2.2/ for detailed information about
# the directives.
#
# Do NOT simply read the instructions in here without understanding
# what they do.  They're here only as hints or reminders.  If you are unsure
# consult the online docs. You have been warned.
#
# The configuration directives are grouped into three basic sections:
#  1. Directives that control the operation of the Apache server process as a
#     whole (the 'global environment').
#  2. Directives that define the parameters of the 'main' or 'default' server,
#     which responds to requests that aren't handled by a virtual host.
#     These directives also provide default values for the settings
#     of all virtual hosts.
#  3. Settings for virtual hosts, which allow Web requests to be sent to
#     different IP addresses or hostnames and have them handled by the
#     same Apache server process.
#
# Configuration and logfile names: If the filenames you specify for many
# of the server's control files begin with "/" (or "drive:/" for Win32), the
# server will use that explicit path.  If the filenames do *not* begin
# with "/", the value of ServerRoot is prepended -- so "/var/log/apache2/foo.log"
# with ServerRoot set to "" will be interpreted by the
# server as "//var/log/apache2/foo.log".
#

### Section 1: Global Environment
#
# The directives in this section affect the overall operation of Apache,
# such as the number of concurrent requests it can handle or where it
# can find its configuration files.
#

#
# ServerRoot: The top of the directory tree under which the server's
# configuration, error, and log files are kept.
#
# NOTE!  If you intend to place this on an NFS (or otherwise network)
# mounted filesystem then please read the LockFile documentation (available
# at <URL:http://httpd.apache.org/docs-2.1/mod/mpm_common.html#lockfile>);
# you will save yourself a lot of trouble.
#
# Do NOT add a slash at the end of the directory path.
#
ServerRoot "/etc/apache2"

#
# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
#
#<IfModule !mpm_winnt.c>
#<IfModule !mpm_netware.c>
LockFile /var/lock/apache2/accept.lock
#</IfModule>
#</IfModule>

#
# PidFile: The file in which the server should record its process
# identification number when it starts.
# This needs to be set in /etc/apache2/envvars
#
PidFile ${APACHE_PID_FILE}

#
# Timeout: The number of seconds before receives and sends time out.
#
Timeout 300

#
# KeepAlive: Whether or not to allow persistent connections (more than
# one request per connection). Set to "Off" to deactivate.
#
KeepAlive On

#
# MaxKeepAliveRequests: The maximum number of requests to allow
# during a persistent connection. Set to 0 to allow an unlimited amount.
# We recommend you leave this number high, for maximum performance.
#
MaxKeepAliveRequests 100

#
# KeepAliveTimeout: Number of seconds to wait for the next request from the
# same client on the same connection.
#
KeepAliveTimeout 15

##
## Server-Pool Size Regulation (MPM specific)
##

# prefork MPM
# StartServers: number of server processes to start
# MinSpareServers: minimum number of server processes which are kept spare
# MaxSpareServers: maximum number of server processes which are kept spare
# MaxClients: maximum number of server processes allowed to start
# MaxRequestsPerChild: maximum number of requests a server process serves
<IfModule mpm_prefork_module>
    StartServers          5
    MinSpareServers       5
    MaxSpareServers      10
    MaxClients          150
    MaxRequestsPerChild   0
</IfModule>

# worker MPM
# StartServers: initial number of server processes to start
# MaxClients: maximum number of simultaneous client connections
# MinSpareThreads: minimum number of worker threads which are kept spare
# MaxSpareThreads: maximum number of worker threads which are kept spare
# ThreadsPerChild: constant number of worker threads in each server process
# MaxRequestsPerChild: maximum number of requests a server process serves
<IfModule mpm_worker_module>
    StartServers          2
    MinSpareThreads      25
    MaxSpareThreads      75
    ThreadLimit          64
    ThreadsPerChild      25
    MaxClients          150
    MaxRequestsPerChild   0
</IfModule>

# event MPM
# StartServers: initial number of server processes to start
# MaxClients: maximum number of simultaneous client connections
# MinSpareThreads: minimum number of worker threads which are kept spare
# MaxSpareThreads: maximum number of worker threads which are kept spare
# ThreadsPerChild: constant number of worker threads in each server process
# MaxRequestsPerChild: maximum number of requests a server process serves
<IfModule mpm_event_module>
    StartServers          2
    MaxClients          150
    MinSpareThreads      25
    MaxSpareThreads      75
    ThreadLimit          64
    ThreadsPerChild      25
    MaxRequestsPerChild   0
</IfModule>

# These need to be set in /etc/apache2/envvars   (內定執行 CGI 程式的 User 及 Group)
User ${APACHE_RUN_USER}
Group ${APACHE_RUN_GROUP}

#
# AccessFileName: The name of the file to look for in each directory
# for additional configuration directives.  See also the AllowOverride
# directive.
#

AccessFileName .htaccess

#
# The following lines prevent .htaccess and .htpasswd files from being
# viewed by Web clients.
#
<Files ~ "^\.ht">
    Order allow,deny
    Deny from all
    Satisfy all
</Files>

#
# DefaultType is the default MIME type the server will use for a document
# if it cannot otherwise determine one, such as from filename extensions.
# If your server contains mostly text or HTML documents, "text/plain" is
# a good value.  If most of your content is binary, such as applications
# or images, you may want to use "application/octet-stream" instead to
# keep browsers from trying to display binary files as though they are
# text.
#
DefaultType text/plain

#
# HostnameLookups: Log the names of clients or just their IP addresses
# e.g., www.apache.org (on) or 204.62.129.132 (off).
# The default is off because it'd be overall better for the net if people
# had to knowingly turn this feature on, since enabling it means that
# each client request will result in AT LEAST one lookup request to the
# nameserver.
#
HostnameLookups Off

# ErrorLog: The location of the error log file.
# If you do not specify an ErrorLog directive within a <VirtualHost>
# container, error messages relating to that virtual host will be
# logged here.  If you *do* define an error logfile for a <VirtualHost>
# container, that host's errors will be logged there and not here.
#
ErrorLog /var/log/apache2/error.log

#
# LogLevel: Control the number of messages logged to the error_log.
# Possible values include: debug, info, notice, warn, error, crit,
# alert, emerg.
#
LogLevel warn

# Include module configuration:  (會啟動 /etc/apache2/mods-enabled 目錄中所有模組)
Include /etc/apache2/mods-enabled/*.load
Include /etc/apache2/mods-enabled/*.conf

# Include all the user configurations:   (管理者主要設定檔)
Include /etc/apache2/httpd.conf

# Include ports listing
Include /etc/apache2/ports.conf

#
# The following directives define some format nicknames for use with
# a CustomLog directive (see below).
# If you are behind a reverse proxy, you might want to change %h into %{X-Forwarded-For}i
#
LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%h %l %u %t \"%r\" %>s %O" common
LogFormat "%{Referer}i -> %U" referer
LogFormat "%{User-agent}i" agent

#
# Define an access log for VirtualHosts that don't define their own logfile
CustomLog /var/log/apache2/other_vhosts_access.log vhost_combined

# Include of directories ignores editors' and dpkg's backup files,
# see README.Debian for details.

# Include generic snippets of statements
Include /etc/apache2/conf.d/

# Include the virtual host configurations:
Include /etc/apache2/sites-enabled/
}}}

''@@color:red;[問題]@@'' 上述設定檔是否有設定, 網站根目錄所在的目錄 (/var/www) ?

{{item1{檢視  /etc/apache2/sites-enabled/000-default 設定檔 (網站根目錄, cgi-bin 目錄)}}}

這個設定檔, 定義 Apache 2 網站根目錄所在的目錄 (DocumentRoot /var/www), 還有定義 /cgi-bin 目錄, 執行 CGI 程式
{{{
<VirtualHost *:80>
	ServerAdmin webmaster@localhost

	DocumentRoot /var/www

	<Directory />
		Options FollowSymLinks
		AllowOverride None
	</Directory>

	<Directory /var/www/>
		Options Indexes FollowSymLinks MultiViews
		AllowOverride None
		Order allow,deny
		allow from all
	</Directory>

	ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/
	<Directory "/usr/lib/cgi-bin">
		AllowOverride None
		Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch
		Order allow,deny
		Allow from all
	</Directory>

	ErrorLog /var/log/apache2/error.log

	# Possible values include: debug, info, notice, warn, error, crit,
	# alert, emerg.
	LogLevel warn

	CustomLog /var/log/apache2/access.log combined

    Alias /doc/ "/usr/share/doc/"
    <Directory "/usr/share/doc/">
        Options Indexes MultiViews FollowSymLinks
        AllowOverride None
        Order deny,allow
        Deny from all
        Allow from 127.0.0.0/255.0.0.0 ::1/128
    </Directory>

</VirtualHost>
}}}

網站根目錄放置在 /var/www 這個目錄嗎? 為什麼呢?因為 DocumentRoot 這個設定值的關係啦!此外,由於 Apache 允許 Internet 對我們的資料進行瀏覽, 所以你當然必須要針對可被瀏覽的目錄進行權限的相關設定,那就是 <Directory> 這個設定值的重要特色!先讓我們來看看預設的主網頁設定吧!
{{{
DocumentRoot "/var/www"           放置首頁的目錄!
}}}
這個設定值重要到不行,因為他規範了 WWW 伺服器主網頁所放置的『目錄』所在。這個設定值是可以被隨意更動的,不過更動之後你必須要針對該目錄進行權限設定,也就是底下要講到的 Directory 這個設定值來規範目錄的權限才行喔!
{{{
<Directory />
    Options FollowSymLinks
    AllowOverride None
</Directory>
}}}
以最嚴格設定值, 設定系統根目錄 (/)

<<toBalaNotes "apache2">>


///%apache2
//%/
{{item1{Google Chromium 專案}}}
官方網站 : http://www.chromium.org/

The ''Chromium projects'' include ''Chromium'' and ''Chromium OS'', the open-source projects behind the Google Chrome  browser and Google Chrome OS, respectively. This site houses the documentation and code related to the Chromium projects and is intended for developers interested in learning about and contributing to the open-source projects.

{{item1{Chromium (專為設計 Chrome 瀏覽器而成立的專案)}}}
Chromium is an open-source browser project that aims to build a safer, faster, and more stable way for all Internet users to experience the web. This site contains design documents, architecture overviews, testing information, and more to help you learn to build and work with the Chromium source code.

''[註]'' 「Chromium」是個開放原始碼的瀏覽器計畫,而「Google Chrome 瀏覽器」則是依據此計畫發展而來的

{{item1{Chromium OS}}}
Chromium OS is an open-source project that aims to provide a fast, simple, and more secure computing experience for people who spend most of their time on the web.  Learn more about the project goals, obtain the latest build, and learn how you can get involved, submit code, and file bugs.

''Chromium OS Software Architecture Overview''
本文網址 : http://www.chromium.org/chromium-os/chromiumos-design-docs/software-architecture

* The Chromium-based browser and the window manager
* System-level software and user-land services: the kernel, drivers, connection manager, and so on
* Firmware 

[img[img/ChromiumOSOverview.png]]

<<toBalaNotes "chromium">>


///%chromium
//%/
''we can get a list of databases:''
{{{
# curl -X GET http://127.0.0.1:5984/_all_dbs
["_users"]
}}}

Let’s create a database:
{{{
# curl -X PUT http://127.0.0.1:5984/baseball
{"ok":true}
# curl -X GET http://127.0.0.1:5984/_all_dbs
["_users","baseball"]
}}}

To round things off, let’s delete the second database:
{{{
# curl -X DELETE http://127.0.0.1:5984/baseball
{"ok":true}
}}}

<<toBalaNotes "curl">>

本文網址 : http://helderribeiro.net/?p=40

So I jumped on the bandwagon and joined all the cool kids in scorning relational databases and playing with CouchDB  (for a sort of long and excellent wrap up on this versus that, read this). I installed  it, read through a lot of docs, thought I understood it pretty well and immediately started searching the tubes for what Ruby framework/library/gizmo would best allow me to get kickstarted with using it on a new project.

Turns out that was a bit premature, as my brain couldn’t really handle trying to model the domain of my intended application onto this totally new way of thinking so abruptly. It’s like when you’re a native Portuguese speaker and you’re drunk, trying to make yourself pass as speaking Spanish and you keep spilling out the German you’ve been learning for the past two years. I’m sure everyone can relate to that.

Time to take a step back.

The best way I found to get more familiar with this new type of database was to get rid of all the mental cruft I had around it. So I forgot about my app, its data model and the web framework and went on to play with the database alone.

I had seen this blog post about how to store hierarchical data in CouchDB and decided to play with the example data and views the guy provided (big thanks to Paul Bonser!). [Note: If you can follow that, you don't need to read this, as it's basically an expanded version of one of his examples.]

This is the data:


{
  "docs": [
    {"_id":"Food", "path":["Food"]},
    {"_id":"Fruit", "path":["Food","Fruit"]},
    {"_id":"Red", "path":["Food","Fruit","Red"]},
    {"_id":"Cherry", "path":["Food","Fruit","Red","Cherry"]},
    {"_id":"Tomato", "path":["Food","Fruit","Red","Tomato"]},
    {"_id":"Yellow", "path":["Food","Fruit","Yellow"]},
    {"_id":"Banana", "path":["Food","Fruit","Yellow","Banana"]},
    {"_id":"Meat", "path":["Food","Meat"]},
    {"_id":"Beef", "path":["Food","Meat","Beef"]},
    {"_id":"Pork", "path":["Food","Meat","Pork"]}
  ]
}

Which corresponds to this tree:

[img[img/couchdb/tree.png]]

To create a database and import this data, save that snippet as a file somewhere (I’ll use /tmp/data.json). CouchDB talks to the world in HTTP. We’re gonna use curl for that so you really see what’s going on. Web browsers are for wimps.

Usually CouchDB runs locally on 127.0.0.1, port 5984. To create a new database all you need to do is PUT to that address with the name you want your DB to have in the URL and no payload. We’ll save that URL in a variable because we’re gonna use it a lot.


DB="http://127.0.0.1:5984/hierarchical_data"
curl -v -X PUT $DB

Here -v means verbose, and -X lets you choose the HTTP method.

We have our database, now we import the data using the bulk document API. We specify the payload (data) with -d and feed it as a string from the file by prefixing its path with a @ (that’s one bash trick I didn’t know about!):


curl -v -d @/tmp/data.json -X POST $DB/_bulk_docs

And this is the view I was interested in (it lists all descendants of a node, including itself):


{
  "language": "javascript",
  "views": {
    "descendants": {
      "map": "
        function(doc) {
            for (var i in doc.path) {
                emit(doc.path[i], doc)
            }
        }"
    }
  }
}

This thing about views going through all objects in your database took a little time to sink in with me. Initially I thought the query took place in the view, that I would somehow pass the node from which I wanted the descendants as the doc argument to that function. That’s not how it works. The query actually takes place in the view parameters, and the view function itself only flattens everything out into a convenient array so you can query it better.

This view I just mentioned, for example, doesn’t actually give you the elements in a sub-tree. It goes through each object (document) in the database and adds it to the array of results once for each of its ancestors.

To see if for yourself, save it in a file somewhere (/tmp/view.json in my case) and add it to the database. We do that by creating a special design document:


curl -v -d @/tmp/view.json -X PUT $DB/_design/tree

Now, to run it, just execute:


curl -v -X GET $DB/_design/tree/_view/descendants

Or see it in the browser: http://localhost:5984/hierarchical_data/_design/tree/_view/descendants

This is what you get:


{"total_rows":29,"offset":0,"rows":[
{"id":"Banana","key":"Banana","value":""},
{"id":"Beef","key":"Beef","value":""},
{"id":"Cherry","key":"Cherry","value":""},
{"id":"Banana","key":"Food","value":""},
{"id":"Beef","key":"Food","value":""},
{"id":"Cherry","key":"Food","value":""},
{"id":"Food","key":"Food","value":""},
{"id":"Fruit","key":"Food","value":""},
{"id":"Meat","key":"Food","value":""},
{"id":"Pork","key":"Food","value":""},
{"id":"Red","key":"Food","value":""},
{"id":"Tomato","key":"Food","value":""},
{"id":"Yellow","key":"Food","value":""},
{"id":"Banana","key":"Fruit","value":""},
{"id":"Cherry","key":"Fruit","value":""},
{"id":"Fruit","key":"Fruit","value":""},
{"id":"Red","key":"Fruit","value":""},
{"id":"Tomato","key":"Fruit","value":""},
{"id":"Yellow","key":"Fruit","value":""},
{"id":"Beef","key":"Meat","value":""},
{"id":"Meat","key":"Meat","value":""},
{"id":"Pork","key":"Meat","value":""},
{"id":"Pork","key":"Pork","value":""},
{"id":"Cherry","key":"Red","value":""},
{"id":"Red","key":"Red","value":""},
{"id":"Tomato","key":"Red","value":""},
{"id":"Tomato","key":"Tomato","value":""},
{"id":"Banana","key":"Yellow","value":""},
{"id":"Yellow","key":"Yellow","value":""}
]}

As you can see, there was no view parameter in that call, and this looks nothing like a list of descendants. Each emit call is responsible for one line of the output, which contains the id of the doc object, a key and a value. [I replaced doc as the value in the original emit call with '' to make it more readable.]. Note that lines do not appear in the order they were emitted (otherwise you’d see lines with the same id grouped together). CouchDB automatically sorts them by key. Another thing you’ll notice is that all the lines whose keys have the same element are also a descendant of that element. Convenient, huh?

Now, to get the descendants of one particular node, just query the view with that node’s name in the key:


curl -v -X GET 'http://localhost:5984/hierarchical_data/_design/tree/_view/descendants?key="Fruit"'

Or, again, use the browser: http://localhost:5984/hierarchical_data/_design/tree/_view/descendants?key=%22Fruit%22

And bingo!


{"total_rows":29,"offset":13,"rows":[
{"id":"Banana","key":"Fruit","value":""},
{"id":"Cherry","key":"Fruit","value":""},
{"id":"Fruit","key":"Fruit","value":""},
{"id":"Red","key":"Fruit","value":""},
{"id":"Tomato","key":"Fruit","value":""},
{"id":"Yellow","key":"Fruit","value":""}
]}





///%curl
//%/
''參考文章''
1. Red Hat Enterprise Linux Server - KSM
http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Virtualization_Administration_Guide/chap-KSM.html
2. Fedora - Features/KSM
http://fedoraproject.org/wiki/Features/KSM
3. 第 4章. 實體與虛擬記憶體 (必讀)
http://web.mit.edu/rhel-doc/4/RH-DOCS/rhel-isa-zh_tw-4/ch-memory.html

The concept of shared memory is common in modern operating systems. For example, when a program is first started it shares all of its memory with the parent program. When either the child or parent program tries to modify this memory, the kernel allocates a new memory region, copies the original contents and allows the program to modify this new region. This is known as ''copy on write''.

KSM is a new Linux feature which uses this concept in reverse. KSM enables the kernel to examine two or more already running programs and compare their memory. If any memory regions or pages are identical, KSM reduces multiple references to multiple identical memory pages to a single reference to a single page. This page is then marked copy on write. If the contents of the page is modified, a new page is created.

This is useful for virtualization with KVM. When a virtualized guest is started, it only inherits the memory from the parent qemu-kvm process. Once the guest is running the contents of the guest operating system image can be shared when guests are running the same operating system or applications. KSM only identifies and merges identical pages which does not interfere with the guest or impact the security of the host or the guests. KSM allows KVM to request that these identical guest memory regions be shared.
KSM provides enhanced memory speed and utilization. With KSM, common process data is stored in cache or in main memory. This reduces cache misses for the KVM guests which can improve performance for some applications and operating systems. Secondly, sharing memory reduces the overall memory usage of guests which allows for higher densities and greater utilization of resources.

Red Hat Enterprise Linux uses two separate methods for controlling KSM:

1. The ''ksm service'' starts and stops the KSM kernel thread.
2. The ''ksmtuned service'' controls and tunes the ksm, dynamically managing same-page merging. The ksmtuned service starts ksm and stops the ksm service if memory sharing is not necessary. ''The ksmtuned service must be told with the retune parameter to run when new virtualized guests are created or destroyed''.

Both of these services are controlled with the standard service management tools.

{{item1{The KSM service}}}
The ksm service is a standard Linux daemon that uses the KSM kernel features.
KSM is included in the qemu-common package, which is a dependency of KVM. KSM is enabled by default in Red Hat Enterprise Linux. When the ksm service is not started, KSM shares only 2000 pages. This default is low and provides limited memory saving benefits.
When the ksm service is started, KSM will share up to half of the host system's main memory. Start the ksm service to enable KSM to share more memory.
{{{
# service ksm start
Starting ksm:                                              [  OK  ]
}}}
The ksm service can be added to the default startup sequence. Make the ksm service persistent with the chkconfig command.
{{{
# chkconfig ksm on
}}}

{{item1{The KSM tuning service}}}
The ksmtuned service does not have any options. The ksmtuned service loops and adjusts ksm. The ksmtuned service is notified by libvirt when a virtualized guest is created or destroyed.
{{{
# service ksmtuned start
Starting ksmtuned:                                         [  OK  ]
}}}
The ksmtuned service can be tuned with the retune parameter. The retune parameter instructs ksmtuned to run tuning functions manually.
The /etc/ksmtuned.conf file is the configuration file for the ksmtuned service. The file output below is the default ksmtuned.conf file.
{{{
# Configuration file for ksmtuned.

# How long ksmtuned should sleep between tuning adjustments
# KSM_MONITOR_INTERVAL=60

# Millisecond sleep between ksm scans for 16Gb server.
# Smaller servers sleep more, bigger sleep less.
# KSM_SLEEP_MSEC=10

# KSM_NPAGES_BOOST=300
# KSM_NPAGES_DECAY=-50
# KSM_NPAGES_MIN=64
# KSM_NPAGES_MAX=1250

# KSM_THRES_COEF=20
# KSM_THRES_CONST=2048

# uncomment the following to enable ksmtuned debug information
# LOGFILE=/var/log/ksmtuned
# DEBUG=1
}}}
<<toBalaNotes "1">>
{{item1{KSM variables and monitoring}}}
KSM stores monitoring data in the /sys/kernel/mm/ksm/ directory. Files in this directory are updated by the kernel and are an accurate record of KSM usage and statistics.
The variables in the list below are also configurable variables in the /etc/ksmtuned.conf file as noted below.
The /sys/kernel/mm/ksm/ files
{{{
full_scans
    Full scans run.
pages_shared
    Total pages shared.
pages_sharing
    Pages presently shared.
pages_to_scan
    Pages not scanned.
pages_unshared
    Pages no longer shared.
pages_volatile
    Number of volatile pages.
run
    Whether the KSM process is running.
sleep_millisecs
    Sleep milliseconds.
}}}
KSM tuning activity is stored in the /var/log/ksmtuned log file if the DEBUG=1 line is added to the /etc/ksmtuned.conf file. The log file location can be changed with the LOGFILE parameter. Changing the log file location is not advised and may require special configuration of SELinux settings.
The /etc/sysconfig/ksm file can manually set a number or all pages used by KSM as not swappable.

Open the /etc/sysconfig/ksm file with a text editor.
{{{
# The maximum number of unswappable kernel pages
# which may be allocated by ksm (0 for unlimited)
# If unset, defaults to half of total memory
# KSM_MAX_KERNEL_PAGES=
}}}
Uncomment the KSM_MAX_KERNEL_PAGES line to manually configure the number of unswappable pages for KSM. Setting this variable to 0 configures KSM to keep all identical pages in main memory which can improve performance if the system has sufficient main memory.
{{{
# The maximum number of unswappable kernel pages
# which may be allocated by ksm (0 for unlimited)
# If unset, defaults to half of total memory
KSM_MAX_KERNEL_PAGES=0
}}}

{{item1{Deactivating KSM}}}
KSM has a performance overhead which may be too large for certain environments or host systems.
KSM can be deactivated by stopping the ksm service and the ksmtuned service. Stopping the services deactivates KSM but does not persist after restarting.
{{{
# service ksm stop
Stopping ksm:                                              [  OK  ]
# service ksmtuned stop
Stopping ksmtuned:                                         [  OK  ]
}}}
Persistently deactivate KSM with the chkconfig command. To turn off the services, run the following commands:
{{{
# chkconfig ksm off
# chkconfig ksmtuned off
}}}
<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
''參考文章''
1. Linux Bridge With ‘brctl’ Tutorial
http://www.lainoox.com/bridge-brctl-tutorial-linux/
2.  [原創] Linux Bridge 下 清除 MAC Address Table
http://binaryhacking.blogspot.tw/2008/02/linux-bridge-mac-address-table.html
3. Set Up The Bridge
http://www.linuxdoc.org/HOWTO/BRIDGE-STP-HOWTO/set-up-the-bridge.html

{{item1{設定橋接網路裝置}}}

''1. 產生橋接網路裝置 (swhub)''
{{{
$ sudo brctl addbr swhub

$ ifconfig swhub
swhub     Link encap:Ethernet  HWaddr 2a:3f:01:f8:2b:e6  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

}}}

''2. 設定橋接網路裝置的 IP''
{{{
$ sudo ifconfig swhub 172.16.20.254 netmask 255.255.255.0 up

$ ifconfig swhub
swhub     Link encap:Ethernet  HWaddr 2a:3f:01:f8:2b:e6  
          inet addr:172.16.20.254  Bcast:172.16.255.255  Mask:255.255.255.0
          inet6 addr: fe80::283f:1ff:fef8:2be6/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:26 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:6005 (6.0 KB)
}}}

<<toBalaNotes "1">>
{{item1{設定 TAP 網路介面}}}

''1. 產生二個 TAP 網路介面''
{{{
$ sudo tunctl -b -u student
tap0

$ sudo tunctl -b -u student
tap1
}}}

''2. 啟動新建 TAP 網路裝置''
{{{
$ sudo ifconfig tap0 up
$ ifconfig tap0
tap0      Link encap:Ethernet  HWaddr 06:1d:10:90:0c:29  
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

$ sudo ifconfig tap1 up
$ ifconfig tap1
tap1      Link encap:Ethernet  HWaddr 6e:5f:65:68:41:48  
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

{{item1{設定 Switch-HUB 網路裝置}}}
''1. 將 TAP 網路介面, 連接至橋接網路裝置''
{{{
$ sudo brctl addif swhub tap1
$ sudo brctl addif swhub tap0
}}}

''2. 檢視 swhub 橋接網路裝置的 MAC 表格''
{{{
$ sudo brctl showmacs swhub
port no	mac addr		is local?	ageing timer
  2	06:1d:10:90:0c:29	yes		   0.00
  1	6e:5f:65:68:41:48	yes		   0.00
}}}

swhub 橋接網路裝置, 會根據加入網卡裝置, 選其最小的 MAC 位址, 作為本身的 MAC 位址, 內容如下 :
{{{
$ ifconfig swhub
swhub     Link encap:Ethernet  HWaddr 06:1d:10:90:0c:29  
          inet addr:172.16.20.254  Bcast:172.16.255.255  Mask:255.255.0.0
          inet6 addr: fe80::384f:a4ff:fec7:5799/64 Scope:Link
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:56 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:10545 (10.5 KB)
}}}

''3. 檢視 swhub 內部運作資訊''
{{{
$ sudo brctl showstp swhub
swhub
 bridge id		8000.061d10900c29
 designated root	8000.061d10900c29
 root port		   0			path cost		   0
 max age		  20.00			bridge max age		  20.00
 hello time		   2.00			bridge hello time	   2.00
 forward delay		  15.00			bridge forward delay	  15.00
 ageing time		 300.01
 hello timer		   0.36			tcn timer		   0.00
 topology change timer	   0.00			gc timer		  32.29
 flags			

tap0 (2)
 port id		8002			state		       disabled
 designated root	8000.061d10900c29	path cost		 100
 designated bridge	8000.061d10900c29	message age timer	   0.00
 designated port	8002			forward delay timer	   0.00
 designated cost	   0			hold timer		   0.00
 flags			

tap1 (1)
 port id		8001			state		       disabled
 designated root	8000.061d10900c29	path cost		 100
 designated bridge	8000.061d10900c29	message age timer	   0.00
 designated port	8001			forward delay timer	   0.00
 designated cost	   0			hold timer		   0.00
 flags			
}}}

上面的資訊在其它 Linux 系統, 可由以下命令得知
{{{
$ sudo brctl showbr swhub
}}}
''[註]'' Ubuntu 12.04 的 brctl 命令不提供 showbr 這參數

{{item1{顯示所有橋接網路裝置}}}
{{{
$ brctl show
bridge name	bridge id		STP enabled	interfaces
NET100		8000.525400be89e7	no		NET100-nic
NET99		8000.525400da18de	no		NET99-nic
swhub		8000.061d10900c29	no		tap0
							tap1
vbr660		8000.525400aa0660	no		vbr660-nic
vbr661		8000.525400aa0661	no		vbr661-nic
vbr88		8000.525400aa0088	no		vbr88-nic
virbr0		8000.fe5400a08dac	yes		vnet0
}}}

{{item1{啟動 DHCP Server}}}
{{{
$ sudo dnsmasq -u student --bind-interfaces --dhcp-leasefile=/tmp/dnsmasq172.log --conf-file= --listen-address 172.16.20.254 --dhcp-range 172.16.20.100,172.16.20.110 --dhcp-lease-max=10 --except-interface=lo --interface=swhub
}}}

''[註]'' DHCP Server 租用資訊存在 /tmp/dnsmasq172.log
{{item1{啟動虛擬電腦}}}

''1. 第一部 Client 虛擬電腦 (由 DHCP 取得 IP 位址)''
{{{
$ cd ~

$ kvm -name "tsc321" -m 128 -kernel vmlinuz -initrd tsc32.gz -net nic,macaddr=52:54:72:16:20:10 -net tap,ifname=tap0,script=no,downscript=no &
}}}

''2. 第二部 Client 虛擬電腦''
{{{
$ kvm -name "tsc322" -m 128 -kernel vmlinuz -initrd tsc32.gz -net nic,macaddr=52:54:72:16:20:11 -net tap,ifname=tap1,script=no,downscript=no -append "nodhcp ipv4=172.16.20.11:255.255.255.0:172.16.20.254:168.95.1.1" &
}}}

''3. 檢視 DHCP Server Log''
{{{
$ cat /tmp/dnsmasq172.log 
1364808318 52:54:72:16:20:10 172.16.20.106 box 01:52:54:72:16:20:10
}}}

{{item1{測通網路}}}

''1. 在 HOST OS 執行 ping 命令''
{{{
$ ping 172.16.20.10

$ ping 172.16.20.11

$ arp -n
Address                  HWtype  HWaddress           Flags Mask            Iface
172.16.20.11             ether   52:54:72:16:20:11   C                     swhub
172.16.20.10             ether   52:54:72:16:20:10   C                     swhub
}}}

{{item1{啟動 NAT}}}
{{{
$ sudo iptables -t nat -A POSTROUTING -s 172.16.20.0/24 -j MASQUERADE
}}}
<<toBalaNotes "2">>

{{item1{建立橋接網路裝置的專屬網卡 (意在指定專屬 MAC 位址)}}}

''1. 建立 TAP 網卡''
{{{
$ sudo tunctl -u student -t shub66-net
Set 'shub66-net' persistent and owned by uid 1000

# 新建 TAP 網卡 (shub66-net), 內定沒有 IP, 而且沒有啟動
$ ifconfig shub66-net
shub66-net Link encap:Ethernet  HWaddr da:e5:54:85:63:5a  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''2. 指定新建 TAP 網卡的 MAC 位址''
所指定的 MAC 位址, 數字要小, 因橋接網路裝置, 會根據加入網卡裝置, 選其最小的 MAC 位址, 作為本身的 MAC 位址
{{{
$ sudo ifconfig shub66-net hw ether 52:54:00:00:00:66
$ ifconfig shub66-net
shub66-net Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''3. 建立橋接網路裝置''
{{{
$ sudo brctl addbr shub66
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr b2:d8:81:d9:53:69  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''4. 橋接網路裝置加入專屬網卡''
{{{
$ sudo brctl addif shub66 shub66-net
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''5. 指定橋接網路裝置的 IP, 並啟動''
橋接網路裝置的專屬 TAP 網卡不需要啟動
{{{
$ sudo ifconfig shub66 172.16.20.253/24 up
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          inet addr:172.16.20.253  Bcast:172.16.20.255  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''6. 加入新建虛擬電腦專屬 TAP 網卡, 並解決MAC 問題''
{{{
# 新建虛擬電腦專屬 TAP 網卡
$ sudo tunctl -u student 
Set 'tap0' persistent and owned by uid 1000

# 新建虛擬電腦專屬 tap0 網卡的 MAC 位址, 竟然比 shub66-net 的 MAC 位址來的小
# TAP 網卡的 MAC 位址是由亂數產生
$ ifconfig tap0
tap0      Link encap:Ethernet  HWaddr 16:e8:54:e3:21:73  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

# 將新建虛擬電腦專屬 tap0 網卡, 加入橋接網路裝置後, 果真改變橋接網路裝置 (shub66) 的 MAC 位址
$ sudo brctl addif shub66 tap0
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 16:e8:54:e3:21:73  
          inet addr:172.16.20.253  Bcast:172.16.20.255  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

# 因上述的 MAC 問題, 解決之道就是改變 tap0 網卡的 MAC 位址, 將它改小
$ sudo ifconfig tap0 hw ether 52:54:72:16:20:13 

# 記得一定要啟動 tap0 這張網卡
$ sudo ifconfig tap0 up

# 再次檢視 shub66 的 MAC 位址
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          inet addr:172.16.20.253  Bcast:172.16.20.255  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''7. 測試橋接網路裝置''
{{{
$ kvm -name "tsc321" -m 128 -kernel vmlinuz64 -initrd tsc64.gz -net nic,macaddr=52:54:72:16:20:10 -net tap,ifname=tap0,script=no,downscript=no -append "nodhcp ipv4=172.16.20.10:255.255.255.0:172.16.20.254:168.95.1.1" &
}}}
<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. KVM/QEMU hypervisor driver
http://libvirt.org/drvqemu.html

{{item1{使用 Libvirt KVM/QEMU driver}}}
The Libvirt KVM/QEMU driver is a multi-instance driver, providing a single system wide privileged driver (the "system" instance), and per-user unprivileged drivers (the "session" instance). The URI driver protocol is "qemu". 連接 Libvirt KVM/QEMU 驅動程式, 有二種方式, 如下 :
{{{
qemu:///session                      (local access to per-user instance)
qemu:///system                       (local access to system instance)
}}}
As explained above there are two ways to access the QEMU driver in libvirt. @@color:red;The """qemu:///session""" family of URIs connect to a libvirtd instance running as the same user/group ID as the client application.@@ Thus the QEMU instances spawned from this driver will share the same privileges as the client application. The intended use case for this driver is desktop virtualization, with virtual machines storing their disk images in the user's home directory and being managed from the local desktop login session.

@@color:red;The """qemu:///system""" family of URIs connect to a libvirtd instance running as the privileged system account 'root'.@@ Thus the QEMU instances spawned from this driver may have much higher privileges than the client application managing them. The intended use case for this driver is server virtualization, where the virtual machines may need to be connected to host resources (block, PCI, USB, network devices) whose access requires elevated privileges. 

{{item1{"""qemu:///session""" (多人模式)}}}
此運作模式, 需利用 virt-install 命令產生虛擬主機

''1. 啟動 vyatta 虛擬電腦''
{{{
$ virsh --connect qemu:///session start vyatta
區域 vyatta 已開啟
}}}

''[重要]'' 上面命令並不會改變 家目錄中 vyatta.qcow2 硬碟檔權限, 這樣的運作方式, 允許多人可同時啟動各自的虛擬主機 @@color:red;(實測過)@@

''2. 進入 virsh 虛擬化的互動模式終端機''
{{{
$ virsh --connect qemu:///session 
歡迎使用 virsh - 虛擬化的互動模式終端機。

類型:  「help」以取得指令的求助畫面
        「quit」離開

virsh # list
 Id 名稱               狀態
----------------------------------
  1 vyatta               執行中

virsh # quit
}}}

''3. 登入 vyatta 虛擬電腦''
{{{
$ virsh --connect qemu:///session console vyatta
Connected to domain vyatta
Escape character is ^]

Welcome to Vyatta - vyatta ttyS0

vyatta login: vyatta
Password: 
Linux vyatta 3.3.8-1-586-vyatta-virt #1 SMP Wed Mar 13 10:54:37 PDT 2013 i686
Welcome to Vyatta.
This system is open-source software. The exact distribution terms for 
each module comprising the full system are described in the individual 
files in /usr/share/doc/*/copyright.
vyatta@vyatta:~$ 
}}}

''[重要]'' 按 Ctrl + ] 複合鍵, 離開 vyatta 虛擬電腦

''4. 關閉 vyatta 虛擬電腦''
{{{
$ virsh --connect qemu:///session shutdown vyatta
區域 vyatta 正在執行關機

$ virsh --connect qemu:///session list
 Id 名稱               狀態
----------------------------------
}}}
<<toBalaNotes "1">>
{{item1{"""qemu:///system""" (root 模式)}}}
此運作模式可由自訂的定義檔 (XML) 及 硬碟檔產生虛擬電腦

''1.建立 vyatta 虛擬電腦''
{{{
$ virsh define .libvirt/qemu/vyatta.xml 
區域 vyatta 定義自 .libvirt/qemu/vyatta.xml
}}}

上述命令執行成功, 會在 Libvirt 管理平台的設定目錄, 產生 vyatta.xml 檔案, 擁有人是 root
{{{
$ tree -up /etc/libvirt/qemu
/etc/libvirt/qemu
├── [drwxr-xr-x root    ]  networks
│ ├── [drwxr-xr-x root    ]  autostart
│ │ └── [lrwxrwxrwx root    ]  default.xml -> /etc/libvirt/qemu/networks/default.xml
│ └── [-rw-r--r-- root    ]  default.xml
└── [-rw------- root    ]  vyatta.xml

2 directories, 3 files
}}}

''2. 列出目前虛擬電腦清單''
{{{
$ virsh list --all
 Id 名稱               狀態
----------------------------------
  - vyatta               關機
}}}

''3. 啟動虛擬電腦''
{{{
$ virsh start vyatta
區域 vyatta 已開啟
}}}

''4. 連接並登入 vyatta 虛擬電腦''
{{{
$ virsh console vyatta
}}}

''5. 請按 Ctrl +] 脫離登入的 vyatta 虛擬電腦''

''6. 關閉虛擬電腦''
{{{
$ virsh shutdown vyatta
}}}

''[注意]'' 
在 Ubuntu Server 10.04 版中, 必須安裝 acpid 套件, 這樣 Ubuntu Server 才會處理, virsh shutdown 送出的關機命令, 以下命令是在''Ubuntu Server 虛擬主機'' 中操作 :
{{{
$ sudo apt-get install acpid
}}}

''強制關閉虛擬電腦''
{{{
$ virsh destroy vyatta
區域 vyatta 已經刪除
}}}

''7. 移除虛擬電腦''
{{{
$ virsh undefine vyatta
}}}

{{item1{虛擬硬碟檔 (vyatta.qcow2) 的檔案權限竟然被更改}}}
@@color:red;只要使用 virsh 命令, 啟動過的虛擬電腦的硬碟檔, 其權限一律更改為 root:root, 請看以下操作資訊 :@@
{{{
$ ll vyatta*.qcow2
-rw-r--r-- 1 student student 688914432 11月 30 14:01 vyatta01.qcow2
-rw-r--r-- 1 root    root    689176576 11月 30 14:27 vyatta.qcow2
}}}

此時執行 virsh start vyatta 命令, 還是可以把 vyatta 虛擬電腦啟動, 因 virsh 命令啟動虛擬電腦, 會自動更改虛擬硬碟檔的權限為 libvirt-qemu:kvm, 請看以下操作資訊
{{{
$ virsh start vyatta
區域 vyatta 已開啟

$ ll vyatta.qcow2
-rw-r--r-- 1 libvirt-qemu kvm 689176576 11月 30 14:27 vyatta.qcow2
}}}

還記得之前已將目前登入的帳號 (student), 加入 kvm 群組, 請看以下操作資訊
{{{
$ cat /etc/group | grep kvm
kvm:x:126:student
}}}

{{item1{檢視 Libvirtd 執行帳號}}}
檢視 /etc/libvirt/qemu.conf 這個設定檔

[img[img/kvm/qemuconf.png]]

''[註]'' Libvirtd 內定運作帳號及群組為 root/root

<<toBalaNotes "2">>
{{item1{使用 SSH 連接遠端 KVM 操作主機}}}
{{{
$ virsh -c qemu+ssh://root@140.137.214.253/system
root@140.137.214.253's password :
virsh # 
}}}

''A CDROM device can be mounted in a virtual machine by entering:''
{{{
$ virsh -c qemu:///system attach-disk US104NAT /dev/cdrom /media/cdrom
}}}

///%1
//%/

///%2
//%/
本文網址 : http://en.wikipedia.org/wiki/Loop_device

In Unix-like  operating systems, a loop device, vnd (vnode disk), or lofi  (loopback file interface) is a pseudo-device that makes a file  accessible as a block device.

Before use, a loop device must be connected to an existing file in the filesystem. The association provides the user with an API that allows the file to be used in place of a block special file (cf. device file system). Thus, if the file contains an entire file system, the file may then be mounted as if it were a disk device.

Files of this kind are often used for ''CD ISO images'' and ''floppy disc images''. Mounting a file containing a filesystem via such a loop mount makes the files within that filesystem accessible. They appear in the mount point directory.

A loop device may allow some kind of data elaboration during this redirection. For example, the device may be the unencrypted version of an encrypted file. In such a case, the file associated with a loop device may be another pseudo-device. This is mostly useful when this device contains an encrypted file system. If supported, the loop device is in this case the decrypted version of the original encrypted file and can therefore be mounted as if it were a normal filesystem.

<<toBalaNotes "loopdev">>
{{item1{Loop device 實作 : @@color:blue;Loop 裝置無法處理 '隨需擴增' 的硬碟檔@@}}}
本文網址 : http://www.ibm.com/developerworks/linux/library/l-linux-filesystem/
Associating a file system to a storage device in Linux is a process called mounting. The mount command is used to attach a file system to the current file system hierarchy (root). During a mount, you provide a file system type, a file system, and a mount point.

To illustrate the capabilities of the Linux file system layer (and the use of mount), create a file system in a file within the current file system. This is accomplished first by creating a file of a given size using dd (copy a file using /dev/zero as the source) -- in other words, a file initialized with zeros, as shown in Listing 1.

''Listing 1. Creating an initialized file''
{{{
$ dd if=/dev/zero of=file.img bs=1k count=10000
10000+0 records in
10000+0 records out
10240000 bytes (10 MB) copied, 0.0074995 s, 137 MB/s

# 使用 file 命令檢視 file.img 的格式, 
$ file -s file.img
file.img: data

# file 命令 參數說明
-s, --special-files        treat special (block/char devices) files as ordinary ones
}}}
You now have a file called file.img that's 10MB. 

''[註]'' /dev/zero 是一個特殊的設備,從這個設備中,讀取的內容,全部都是 0 。

Use the losetup command to associate a loop device with the file (making it look like a block device instead of just a regular file within the file system):
{{{
# 檢視系統預載 loop 裝置
$ ll /dev/loop[0-9]*
brw-rw---- 1 root disk  7,   0  7月 11 12:39 /dev/loop0
brw-rw---- 1 root disk  7,   1  7月 11 12:39 /dev/loop1
brw-rw---- 1 root disk  7,   2  7月 11 12:39 /dev/loop2
brw-rw---- 1 root disk  7,   3  7月 11 12:39 /dev/loop3
brw-rw---- 1 root disk  7,   4  7月 11 12:39 /dev/loop4
brw-rw---- 1 root disk  7,   5  7月 11 12:39 /dev/loop5
brw-rw---- 1 root disk  7,   6  7月 11 12:39 /dev/loop6
brw-rw---- 1 root disk  7,   7  7月 11 12:39 /dev/loop7

# 將 disk.img 掛載在 /dev/loop0 裝置
$ sudo losetup /dev/loop0 file.img

# 檢視 /dev/loop0 掛載資訊
$ sudo losetup /dev/loop0 
/dev/loop0: [0811]:8917357 (/home/student/KVM/iKVM/file.img)
}}}

With the file now appearing as a block device (represented by /dev/loop0), create a file system on the device with mke2fs. This command creates a new second ext4 file system of the defined size, as shown in Listing 2.

''Listing 2. Creating an ext4 file system with the loop device @@color:blue;(直接格式化此資料檔, 不需規劃分割區)@@''
{{{
$ sudo mkfs.ext4 /dev/loop0
mke2fs 1.42 (29-Nov-2011)
Discarding device blocks: done                            
Filesystem label=
OS type: Linux
Block size=1024 (log=0)
Fragment size=1024 (log=0)
Stride=0 blocks, Stripe width=0 blocks
2512 inodes, 10000 blocks
500 blocks (5.00%) reserved for the super user
First data block=1
Maximum filesystem blocks=10485760
2 block groups
8192 blocks per group, 8192 fragments per group
1256 inodes per group
Superblock backups stored on blocks: 
	8193

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (1024 blocks): done
Writing superblocks and filesystem accounting information: done

# 使用 file -s 檢視 file.img 的格式,
$ file -s file.img
file.img: Linux rev 1.0 ext4 filesystem data, UUID=5b5c9687-6069-4207-b61e-3daf119b1c6d (extents) (huge files)

}}}
The file.img file, represented by the loop device (/dev/loop0), is now mounted to the mount point /mnt/point1 using the mount command. Note the specification of the file system as ext2. When mounted, you can treat this mount point as a new file system by doing using an ls command, as shown in Listing 3.

''Listing 3. Creating a mount point and mounting the file system through the loop device''
{{{
$ mkdir /tmp/loop0

$ sudo mount -t ext4 /dev/loop0 /tmp/loop0

$ ls -al /tmp/loop0/
總計 17
drwxr-xr-x  3 root root  1024  7月 11 13:40 .
drwxrwxrwt 12 root root  4096  7月 11 13:23 ..
drwx------  2 root root 12288  7月 11 13:20 lost+found
}}}

As shown in Listing 4, you can continue this process by creating a new file within the new mounted file system, associating it with a loop device, and creating another file system on it.

''Listing 4. Creating a new loop file system within a loop file system''
{{{
$ dd if=/dev/zero of=/mnt/point1/file.img bs=1k count=1000
1000+0 records in
1000+0 records out
$ losetup /dev/loop1 /mnt/point1/file.img
$ mke2fs -c /dev/loop1 1000
mke2fs 1.35 (28-Feb-2004)
max_blocks 1024000, rsv_groups = 125, rsv_gdb = 3
Filesystem label=
...
$ mkdir /mnt/point2
$ mount -t ext2 /dev/loop1 /mnt/point2
$ ls /mnt/point2
lost+found
$ ls /mnt/point1
file.img lost+found

}}}

''Listing 5. Umountting a mount point and umounting the file system through the loop device''
{{{
$ sudo umount /tmp/loop0/

$ sudo losetup -d /dev/loop0
}}}

From this simple demonstration, it's easy to see how powerful the Linux file system (and the loop device) can be. You can use this same approach to create encrypted file systems with the loop device on a file. This is useful to protect your data by transiently mounting your file using the loop device when needed.

{{item1{Loop 裝置另一種實作方式}}}
{{{

# 以下命令會自動將 file.img 掛載到 /dev/loop0, 然後再將 /dev/loop0 掛載到 /tmp/loop0
$ sudo mount -o loop file.img /tmp/loop0

$ ll /tmp/loop0/
總計 17
drwxr-xr-x  3 root root  1024  7月 11 13:22 ./
drwxrwxrwt 12 root root  4096  7月 11 13:23 ../
-rw-r--r--  1 root root     0  7月 11 13:22 hello
drwx------  2 root root 12288  7月 11 13:20 lost+found/

# 以下命令證明 file.img 有掛載到 /dev/loop0
$ sudo losetup /dev/loop0
/dev/loop0: [0811]:8917357 (/home/student/KVM/iKVM/file.img)

# 以下命令會自動將 file.img 從 /dev/loop0 卸掛載
$ sudo umount /tmp/loop0

$ sudo losetup /dev/loop0
loop:無法取得裝置上的資訊 /dev/loop0: 沒有此一裝置或位址
}}}

Linux and other Unix-like hosts can mount images created with the ''raw format (重點)'' type using a loopback device. From a root login (or using sudo), mount a loopback with an offset of 32,256.
{{{
# mount -o loop,offset=32256 /path/to/image.img /mnt/mountpoint
}}}

''Note:'' if you have an image without partitions you should omit the ,offset=32256 part. 
<<toBalaNotes "vfsdo">>
{{item1{建立與使用 RamDisk}}}
建立 ramdisk 空間的方式,是用 dd 指令,填入指定大小的內容後, 就會在 ram 中保留該大小的空間;而這個 ram 空間,就連接到 dd 指 令中,所指定的設備名稱,如 /dev/ram0 。
{{{
$ dd if=/dev/zero of=/dev/ram0 bs=1k count=4096
}}}
上面這指令的意義是,從輸入設備 /dev/zero 中,以 bs 指定的大小(1k), 讀取資料,輸出到設備 /dev/ram0 中,重覆輸出入 count 指定的次 數(4096次)。這表示 /dev/ram0 所連接的 ram 空間,會有 4096K 的空間可用。

/dev/zero 是一個特殊的設備,從這個設備中,讀取的內容,全部都是 0 。

ramdisk 可以建立的很大,但我們只是要找一個空間,供我們製作 root disk ,因為一塊軟碟片的空間不會很大,所以我們也不需要 挖一塊很大的 ram 空間,大概 4096k 就夠了。

雖然一塊 MF-2HD 的磁片,只有 1440K ,但因為我們是要將 root filesystem 的內容,變成 image 檔,再壓縮過一次後,才放入磁片裡, 所以這塊要做為 root filesystem 的空間,可以大於 1440K 。

格式化這塊 ram 空間,因為 minix 檔案系統格式比較省空間,所以 用這格式:
{{{
$ mkfs.minix /dev/ram0 4096
}}}
最後 mount 此空間 (我 mount 在 /mnt/rootfs 目錄上),準備複製檔案:
{{{
$ mount -t minix /dev/ram0 /mnt/rootfs 
}}}


///%loopdev
//%/

///%vfsdo
//%/
''參考文章''
1. Mobile web application framework match-up, Part 3: Boost your next mobile web app with jQTouch
http://www.ibm.com/developerworks/library/wa-jqtouch/
2. Create an iPhone Optimised Website using jQTouch
http://www.tuttoaster.com/create-an-iphone-optimised-website-using-jqtouch/5/
3. Jquery Android Apps (很重要)
http://www.androidzoom.com/free_android_applications/Jquery
4. Building Mobile Web Apps the Right Way: Tips and Techniques
http://sixrevisions.com/web-applications/building-mobile-web-apps-the-right-way-tips-and-techniques/
5. Learn Sencha Touch (重要)
http://www.sencha.com/learn/touch/
6. Introduction to jQuery Mobile
https://www.ibm.com/developerworks/library/wa-jquerymobileupdate/index.html

{{item1{Mobile Web App Framework}}}
近年來 SmartPhone 大行其道,iPhone 和 Android 的能見度越來越高,這些裝置內建的瀏覽器功能進展快速,對於 HTML5 和 CSS3 的支援程度有時候更勝於電腦上的瀏覽器。就因為如此,對於熟悉網站開發技術的開發者而言,這是一個跨入 Mobile Web App 的絕佳機會。因為我們不一定需要去學習 Objective-C 或 Java 這些不熟悉的語言才能開始撰寫行動裝置上的應用程式。就如同在在開發一般網路應用程式一樣,選擇一套好用的 framework,絕對是事半功倍。很幸運的是因為時勢所趨,目前 Mobile Web App Framework 選擇性其實不少,不過到底有哪些?要怎麼選?就變成一個困難的課題了。

這些不同的 Mobile Web App Framework,我認為在功能上應該可分為三大部份。

1. ''UI Component'':提供適用於觸控螢幕的UI元件 
2. ''Javascript Library'':讓開發更有效率的Javascipt函式庫。 
3. ''Wrapper'':把 Mobile Web App 包裝成原生軟體 (Native App),同時提供一些 function 讓 javascript 可以存取一些底層的功能 (例如:相機、檔案系統、各類 sensor)。 

如果只是要開發 Mobile Web App,就不需要 Wrapper。如果想包裝成 Native App 上 App Store 或 Android Market 去賣,就得用上 Wrapper。而 UI Component 和 Javascript Library 則是可以依照自己的喜好選擇用或不用。

下面的表格就是依照上面三大部份和支援的裝置整理出來的清單,轉跳後有各 framework 的簡要說明。

[img[img/MobileWebApp.jpg]]

{{item1{jquery mobile}}}
官方網站 : http://jquerymobile.com/

Dreamweaver 5.5 內建了jQuery Mobile 開發框架,Paul Burnett 表示,開發人員透過滑鼠點選,可以快速增加手機應用常見的介面元素,例如按鈕式列表和選單、調整滑桿,換頁效果等,也能讓網頁使用觸控螢幕 的操縱元素。開發人員只要修改元件的內容和對應程式碼後,就可以輸出打包成不同平臺上的應用程式。jQuery Mobile 會自動支援跨手機平臺的程式碼相容。Dreamweaver 也強化了不同螢幕尺寸的預覽機制,以便開發者比較在不同裝置上的呈現效果。不論網 頁程式或Flash程式都可以打包成不同平臺上的 App,其中包括Android、RIM、iOS等

{{item1{認識 Sencha Touch}}}
官方網址 : http://www.sencha.com/products/touch/

Sencha Touch 2, a high-performance HTML5 mobile application framework, is the cornerstone of the Sencha HTML5 platform. Built for enabling world-class user experiences, Sencha Touch 2 is the only framework that enables developers to build fast and impressive apps that work on iOS, Android, BlackBerry, Kindle Fire, and more.

Find out why more and more companies are investing in their future by developing with HTML5. View Apps and Customers

{{item1{認識 jQTouch}}}
官方網址 : http://www.jqtouch.com/

jQTouch makes programming for mobile browsers fun! Simple HTML, CSS, and jQuery Javascript combine to make it easy to build applications for WebKit-based mobile browsers like the iPhone/iPod Touch, Android, and Palm webOS.

{{op1{jQTouch 實作-靜態資訊}}}

''index.html''
{{{
<!doctype html>
<html>
<head>
    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
    <title>Intranet Employee Directory</title>
    <style type="text/css" media="screen">
        @import "jqtouch/jqtouch.min.css";
    </style>
    <style type="text/css" media="screen">
        @import "themes/jqt/theme.min.css";
    </style>
    <script src="jqtouch/jquery.1.3.2.min.js" type="text/javascript" charset="utf-8"></script>
    <script src="jqtouch/jqtouch.min.js" type="text/javascript" charset="utf-8"></script>
    <script type="text/javascript">
        var jQT = $.jQTouch({
          icon : 'icon.png'
      });
    </script>
</head>
<body>
    <div class="home">
        <div class="toolbar">
            <h1>Employees</h1>
        </div>
        <ul class="edgetoedge">
            <li class="arrow"><a href="#list-style">List</a></li>
            <li class="arrow"><a href="#table-style">Table</a></li>
        </ul>
    </div>
</body>
</html>
}}}

''網頁畫面''

[img[img/jQTouch/jqtouch01.jpg]]

<<toBalaNotes "1">>

{{op1{jQTouch 實作-動態資訊}}}

{{{
<!doctype html>
<html>
<head>
	<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
	<title>Intranet Employee Directory</title>
        <style type="text/css" media="screen">@import "jqtouch/jqtouch.min.css";</style>
        <style type="text/css" media="screen">@import "themes/jqt/theme.min.css";</style>
	<script src="jqtouch/jquery.1.3.2.min.js" type="text/javascript" charset="utf-8"></script>
        <script src="jqtouch/jqtouch.min.js" type="text/javascript" charset="utf-8"></script>
	<script type="text/javascript">
		var jQT = $.jQTouch({
			icon : 'icon.png',
			startupScreen: 'startup.png'
		});
	$(document).ready(function(){
		$.getJSON('employees.json', function(data){
			data.forEach(addEmployee);
		});
		// Ajax call can be found in Listing 2
		$("#addEmp").submit(function(){
			var e = {
				firstName : $("#fn")[0].value,
				lastName : $("#ln")[0].value,
				email : $("#email")[0].value,
				phone : $("#phone")[0].value
			};
			addEmployee(e);
			jQT.goBack();
			return false;
		});
	});
		function addEmployee(e){
			addEmployeeToList(e);
			addEmployeeToTable(e);
		}
		function addEmployeeToList(e){
			var list = $("#eList");
			var text = e.firstName + " " + e.lastName +
				 ", " + e.phone + ", " + e.email;
			var li = $("<li>").html(text);
			list.append(li);
		}
		function addEmployeeToTable(e){
			var table = $("#eTable");
			var tr = $("<tr>")
					.append($("<td>").html(e.firstName + " " + e.lastName))
					.append($("<td>").html(e.phone))
					.append($("<td>").html(e.email));
			table.append(tr);
		}
	</script>
</head>
<body>
	<div class="home">
		<div class="toolbar">
			<h1>Employees</h1>
		</div>
		<ul class="edgetoedge">
			<li class="arrow"><a href="#list-style">List</a></li>
			<li class="arrow"><a href="#table-style">Table</a></li>
		</ul>
	</div>
	<div id="list-style">
		<div class="toolbar">
			<h1>List</h1>
			<a class="button back" href="#">Back</a>
		</div>
		<ul class="edgetoedge" id="eList"></ul>
	</div>
	<div id="table-style">
		<div class="toolbar">
			<h1>Table</h1>
			<a class="button back" href="#">Back</a>
			<a class="button flip" href="#new">+</a>
		</div>		
		<table>
			<thead>
				<tr>
					<td>Name</td>
					<td>Phone</td>
					<td>Email</td>
				</tr>
			</thead>
			<tbody id="eTable"></tbody>
		</table>
	</div>
	<div id="new">
		<div class="toolbar">
			<h1>Add Employee</h1>
			<a class="button cancel" href="#">Cancel</a>
		</div>
		<form id="addEmp" method="post">
			<ul>
				<li><input type="text" placeholder="First Name" id="fn" 
					name="firstName" />
				</li>
				<li><input type="text" placeholder="Last Name" id="ln" 
					name="lastName" />
				</li>
				<li><input type="email" placeholder="Email" 
					autocapitalize="off" id="email" name="email" />
				</li>
				<li><input type="tel" placeholder="Phone" id="phone" 
					name="phone" />
				</li>								
			</ul>
			<input type="submit" class="submit" value="Submit"/>
		</form>
	</div>
</body>
</html>
}}}
<<toBalaNotes "2">>

///%1
//%/

///%2
//%/


''參考文章''
1. Autologin as root or as any other user in Linux in console mode
http://blogs.koolwal.net/2009/04/30/autologin-linux-console-mode/
2. The TTY demystified
http://www.linusakesson.net/programming/tty/index.php

{{item1{虛擬電腦 - 終端機自動登入}}}
本文網址 : http://tombuntu.com/index.php/2010/01/01/enable-automatic-login-in-ubuntu-9-10-server/

Ubuntu makes it easy to enable automatic login if you’re using a login manager such as GDM. I was recently setting up a minimal Ubuntu 9.10 system with an LXDE desktop and no GDM. Here’s how I enabled automatic login.

''[註]''  This was tested on Ubuntu 9.10. Previous versions of Ubuntu require different procedures because of changes to the way Ubuntu boots.

Open /etc/init/tty1.conf as root:
{{{
$ sudo nano /etc/init/tty1.conf
}}}
Change the last line of this file to (where USERNAME is the username of the user you want to log in):
{{{
exec /bin/login -f USERNAME < /dev/tty1 > /dev/tty1 2>&1
}}}
Reboot, and the user you chose should be logged in automatically after boot. If something goes wrong, you can switch to a different TTY with CTRL+ALT+F2 and log in normally.

If you want this user to be logged into a graphical environment instead of just a shell, there’s more work to be done. Open your user’s .bashrc file:
{{{
nano ~/.bashrc

Add the following to the end of the file:
if [ $(tty) == "/dev/tty1" ]; then
startx
fi
}}}
This code will start X (the graphical environment) whenever the user logs in on TTY1. You can add more code after startx that will be executed if the user logs out of X.

<<toBalaNotes "autologin">>
{{item1{虛擬電腦 - 建立 Serial Console}}}
It is possible to access a KVM Guest directly using the ''Serial Console'' interface, in which case setting up bridged networking, SHH, and similar is not necessary.

This is done using the ''virsh'' utility which a shell interface for ''libvirt''.

''Configure a Serial Console in the Guest''
First, we need to configure a serial console in the guest, in order that it will accept a connection:

This is the configuration method for (guest) Ubuntu 9.10 (Karmic) and later versions, if you are using an earlier version of Ubuntu see [[SerialConsoleHowto|https://help.ubuntu.com/community/SerialConsoleHowto]] for instructions.
{{{
$ sudo nano /etc/init/ttyS0.conf                          # 在 10.04 版, 這個檔案不存在,所以自行產生
}}}

Add the configuration:
{{{
# ttyS0 - getty
#
# This service maintains a getty on ttyS0 from the point the system is
# started until it is shut down again.

start on stopped rc RUNLEVEL=[2345]
stop on runlevel [!2345]

respawn
exec /sbin/getty -L 115200 ttyS0 xterm
}}}

Run the following command to initiate the Serial Console or ''restart the VM'':
{{{
$ sudo start ttyS0

or 

$ sudo reboot
}}}

''直接連接虛擬電腦串列控制台''
{{{
$ virsh console US104_NoBody
Connected to domain US104_NoBody
Escape character is ^]                               
                                                               
Ubuntu 10.04.1 LTS US104NoBody ttyS0                  # 記得要先按 Enter, 進入登入畫面

US104NoBody login: 
}}}
Press ''Return'' to get the login prompt, and login as usual (note that username input works even if the login promt is not shown).

''[註]'' 按 "''Ctrl + ]''" 離開登入的虛擬主機. 如虛擬主機重新開機, virsh console 不需要重新執行

{{item1{虛擬電腦 - Serial Console 自動登入}}}

在虛擬主機, 執行以下步驟 :

''1. 安裝 mingetty 套件''
{{{
$ sudo apt-get install mingetty
}}}

''2. 修改 /etc/init/ttyS0.conf''
{{{
# ttyS0 - getty
#
# This service maintains a getty on ttyS0 from the point the system is
# started until it is shut down again.

start on stopped rc RUNLEVEL=[2345]
stop on runlevel [!2345]

respawn
exec /sbin/mingetty --autologin root --noclear ttyS0
}}}

{{item1{改變語系}}}
使用 Serial Console 方式登入, 內定語系是英文

''$ nano .bashrc''
{{{
                        :
                        :
alias ping='ping -c 4 '
alias bye='sudo shutdown -h now'
alias resetmac='sudo rm /etc/udev/rules.d/70-persistent-net.rules'
alias netstat='netstat -an | head -n 18'

export LANG=zh_TW.UTF-8        (修改為 UTF-8 繁中編碼)
export LANGUAGE=zh_TW:zh    (修改為繁中編碼)
}}}
<<toBalaNotes "sconsole">>

{{item1{虛擬電腦 - 接受 virsh shutdown 命令}}}
在 Ubuntu Server 安裝 acpid 套件
{{{
$ sudo apt-get install acpid
}}}

''acpid 說明''
acpid is designed to notify user-space programs of ACPI events. acpid should be started during the system boot, and will run as a background process, by default. It will open an events file (/proc/acpi/event by default) and attempt to read whole lines. When a line is received (an event), acpid will examine a list of rules, and execute the rules that match the event. 
{{{
$ ls -al /etc/acpi/events/
total 12
drwxr-xr-x 2 root root 4096 2010-09-17 14:17 .
drwxr-xr-x 3 root root 4096 2010-09-17 14:17 ..
-rw-r--r-- 1 root root  423 2010-04-29 13:31 powerbtn

$ cat /etc/acpi/events/powerbtn 
# /etc/acpi/events/powerbtn
# This is called when the user presses the power button and calls
# /etc/acpi/powerbtn.sh for further processing.

# Optionally you can specify the placeholder %e. It will pass
# through the whole kernel event message to the program you've
# specified.

# We need to react on "button power.*" and "button/power.*" because
# of kernel changes.

event=button[ /]power
action=/etc/acpi/powerbtn.sh
}}}
<<toBalaNotes "acpid">>

///%autologin
//%/

///%sconsole
//%/

///%acpid
//%/
{{{
# set service dhcp-server shared-network-name POOL100 subnet 192.168.100.0/24 start 192.168.100.128 stop 192.168.1.240
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL88 subnet 192.168.88.0/24 start 192.168.88.128 stop 192.168.88.240
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL100 subnet 192.168.100.0/24 start 192.168.100.128 stop 192.168.100.240
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL660 subnet 192.168.66.0/25 start 192.168.66.64 stop 192.168.66.120
[edit]

# set service dhcp-server shared-network-name POOL100 subnet 192.168.100.0/24 dns-server 168.95.1.1 
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL88 subnet 192.168.88.0/24 dns-server 168.95.1.1
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL660 subnet 192.168.66.0/25 dns-server 168.95.1.1
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL100 subnet 192.168.100.0/24 default-router 192.168.100.254
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL100 subnet 192.168.88.0/24 default-router 192.168.88.254
[edit]
root@GWVT100# set service dhcp-server shared-network-name POOL100 subnet 192.168.66.0/25 default-router 192.168.66.126
[edit]

}}}

The following macro call exports all tiddlers to a text file "c:/~MyTiddlyWikiExport.txt", using a customized format.
{{{
<<forEachTiddler
 script 'function getSortedTagsText(tiddler) {var tags = tiddler.tags; if (!tags) return ""; tags.sort(); var result = ""; for (var i = 0; i < tags.length;i++) {result += tags[i]+ " ";} return result;} function writeTiddler(tiddler) {return "==== "+tiddler.title+"=========================\nTags: "+ getSortedTagsText(tiddler)+"\nModified: "+tiddler.modified.convertToYYYYMMDDHHMM()+"\nModifier: "+tiddler.modifier+"\n--------------------------------------------------\n"+tiddler.text+"\n--------------------------------------------------\n(End of "+tiddler.title+")\n\n\n\n"}'
 write 
 'writeTiddler(tiddler)' 
 toFile 'file:///c:/MyTiddlyWikiExport.txt' withLineSeparator '\r\n'
>>
}}}
For better readablility here the script text in a nicer layout:
{{{
function getSortedTagsText(tiddler) {
 var tags = tiddler.tags; 
 if (!tags) 
 return ""; 
 tags.sort(); 
 var result = ""; 
 for (var i = 0; i < tags.length;i++) {
 result += tags[i]+ " ";
 } 
 return result;
} 

function writeTiddler(tiddler) {
 return "==== "+tiddler.title+"=========================\n"+
 "Tags: "+ getSortedTagsText(tiddler)+"\n"+
 "Modified: "+tiddler.modified.convertToYYYYMMDDHHMM()+"\n"+
 "Modifier: "+tiddler.modifier+"\n"+
 "--------------------------------------------------\n"+
 tiddler.text+"\n"+
 "--------------------------------------------------\n"
 "(End of "+tiddler.title+")\n\n\n\n"
}
}}}

<<forEachTiddler
 script 'function getSortedTagsText(tiddler) {var tags = tiddler.tags; if (!tags) return ""; tags.sort(); var result = ""; for (var i = 0; i < tags.length;i++) {result += tags[i]+ " ";} return result;} function writeTiddler(tiddler) {return "==== "+tiddler.title+"=========================\nTags: "+ getSortedTagsText(tiddler)+"\nModified: "+tiddler.modified.convertToYYYYMMDDHHMM()+"\nModifier: "+tiddler.modifier+"\n--------------------------------------------------\n"+tiddler.text+"\n--------------------------------------------------\n(End of "+tiddler.title+")\n\n\n\n"}'
 write 
 'writeTiddler(tiddler)' 
 toFile 'file:///c:/MyTiddlyWikiExport.txt' withLineSeparator '\r\n'
>>
{{item1{連接行動網站資料夾}}}

[img[img/mwclass/ws200301.png]]

[img[img/mwclass/ws200302.png]]

[img[img/mwclass/ws200303.png]]

<<toBalaNotes "1">>

{{item1{製作第一個網頁}}}

[img[img/mwclass/ws200304.png]]

<<toBalaNotes "2">>

{{item1{瀏覽第一個網頁}}}
[img[img/mwclass/ws200305.png]]

<<toBalaNotes "3">>


///%1
//%/

///%2
//%/

///%3
//%/
Tiny Server Core (TSC) 此微型 Linux 系統, 是由 Tiny Core Linux 發行套件重製而來. 它可由網路工程師, 直接透過 Linux 核心參數, 設定它的 IP, Default Gateway 及 DNS Server 以及決定是否啟動 NAT 功能, 如有雙網卡還會自動啟用靜態路由功能

在 Linux KVM 裸機系統中 (安裝 qemu-kvm 套件), 使用 Tiny Server Core 的操作步驟如下

''1. 下載 Tiny Server Core 系統檔''
{{{
$ cd ~
$ wget http://tobala.net/download/kvmtsc.zip
--2013-01-03 11:15:23--  http://tobala.net/download/kvmtsc.zip
正在查找主機 tobala.net (tobala.net)... 69.89.27.215
正在連接 tobala.net (tobala.net)|69.89.27.215|:80... 連上了。
已送出 HTTP 要求,正在等候回應... 200 OK
長度: 16607224 (16M) [application/zip]
Saving to: `kvmtsc.zip'

100%[======================================>] 16,607,224  87.4K/s   in 2m 36s

2013-01-03 11:18:03 (104 KB/s) - `kvmtsc.zip' saved [16607224/16607224]
}}}

''2. 解壓縮 kvmtsc.zip 至目前使用者的家目錄''
{{{
$ unzip kvmtsc.zip
Archive:  kvmtsc.zip
  inflating: kvmtsc/tsc32.gz       
  inflating: kvmtsc/tsc64.gz       
  inflating: kvmtsc/vmlinuz32      
  inflating: kvmtsc/vmlinuz64
}}}

''3. Tiny Server Core目錄 (kvmtsc)''
{{{
切換到 kvmtsc 目錄

$ cd kvmtsc   

在 kvmtsc 目錄中存放以下四個檔案

$ ls -alh
總計 16M
drwxrwxr-x  2 student student 4.0K  1月  3 11:18 .
drwxr-xr-x 35 student student 4.0K  1月  3 11:18 ..
-rw-rw-r--  1 student student 5.4M 11月 13 10:48 tsc32.gz
-rw-rw-r--  1 student student 5.5M 11月 13 10:48 tsc64.gz
-rw-rw-r--  1 student student 2.4M  7月  7 21:19 vmlinuz32
-rw-rw-r--  1 student student 2.7M  7月  7 21:43 vmlinuz64

32 位元系統 = vmlinuz32+tsc32.gz
64 位元系統 = vmlinuz64+tsc64.gz
}}}

''4. 啟動 Tiny Server Core 系統''
透過核心參數直接設定 IP, Default Gateway 及 DNS, 命令如下 :
{{{
$ kvm -m 128 -kernel vmlinuz32 -initrd tsc32.gz -append 'nodhcp ipv4=172.16.100.19:255.255.255.0:172.16.100.254:168.95.1.1'
}}}
-append 這個參數的內容格式如下 :

"ipv4=IP:Subnet Mask:default Gateway:DNS IP"

在系統啟動畫面可以得知 TSC 系統的 IP 設為 172.16.100.19, default gateway 設為 172.16.100.254,  DNS Server 設為 168.95.1.1

<<toBalaNotes "1">>
{{item1{直接由 Linux Kernel 啟動 FloppyFW 系統}}}
{{{
$ cd VMFactory/kvm_virtsh/floppyfw

$ kvm -kernel vmlinuz -initrd initrd.sq -append 'rw init=/linuxrc root=/dev/ram0 source=/dev/floppy/0' -drive file=/var/lib/libvirt/images/ffw3.img,if=floppy,index=0,format=raw
}}}

''[註]'' 系統登入後, 可由 /proc/cmdline 這檔案得知 ''append 的參數'' 

///%1
//%/
<<forEachTiddler 
 where
   'tiddler.tags.contains("plugin")'
>>
[img[img/Lab/Lab101.png]]

{{item1{開始建置}}}

''1. 下載 Lab101.zip''
將 Lab101.zip 解壓縮至 iLab 目錄中

''2.建置 Lab101 網路系統架構''
在 Lab101 建置過程分成二個階段, 第一個階段產生三個虛擬網段 (HUB100, HUB660, HUB88), 第二個階段產生三部主機 (GW100, R660,R88) 及 二部桌機 (TC660, TC88), 執行命令如下 :
{{{
$ cd CloudLab
$ sudo ./LabManager.sh Lab101/
[ KVM 虛擬網路管理 ]
a) 建立  d) 刪除  q) 離開 > a

=> 開始建立虛擬網路

建立 HUB100 虛擬網路 (Hub) ? (y/n) y
啟動 HUB100 虛擬網路完成

建立 HUB660 虛擬網路 (Hub) ? (y/n) y
啟動 HUB660 虛擬網路完成

建立 HUB66128 虛擬網路 (Hub) ? (y/n) y
啟動 HUB66128 虛擬網路完成

建立 HUB88 虛擬網路 (Hub) ? (y/n) y
啟動 HUB88 虛擬網路完成

[ KVM 虛擬電腦管理 ]
a) 建立  d) 刪除  q) 離開 > a

=> 開始建立虛擬電腦

建立 GW100 虛擬電腦 ? (y/n) y
複製 GW100.vmdk 檔案 ... 成功
建立 GW100 虛擬電腦完成

建立 R660 虛擬電腦 ? (y/n) y
複製 R660.vmdk 檔案 ... 成功
建立 R660 虛擬電腦完成

建立 R88 虛擬電腦 ? (y/n) y
複製 R88.vmdk 檔案 ... 成功
建立 R88 虛擬電腦完成

建立 TC660 虛擬電腦 ? (y/n) y
複製 TC660.vmdk 檔案 ... 成功
建立 TC660 虛擬電腦完成

建立 TC88 虛擬電腦 ? (y/n) y
複製 TC88.vmdk 檔案 ... 成功
建立 TC88 虛擬電腦完成
}}}

<<toBalaNotes "1">>

{{item1{虛擬電腦網路測通}}}

''1. 啟動 Lab101 所有虛擬電腦''

[img[img/Lab/Lab101start.png]]

''2. 登入 GW100 主機''
{{{
$ sudo virsh console GW100
Connected to domain GW100
Escape character is ^]

Micro Core Linux 3.8.2
GW100 login: root

$ ping www.hinet.net
PING www.hinet.net (202.39.224.7): 56 data bytes
64 bytes from 202.39.224.7: seq=0 ttl=243 time=191.865 ms
64 bytes from 202.39.224.7: seq=1 ttl=243 time=71.684 ms
64 bytes from 202.39.224.7: seq=2 ttl=243 time=71.417 ms
64 bytes from 202.39.224.7: seq=3 ttl=243 time=80.469 ms

--- www.hinet.net ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 71.417/103.858/191.865 ms

$ ping 192.168.88.11
PING 192.168.88.11 (192.168.88.11): 56 data bytes
64 bytes from 192.168.88.11: seq=0 ttl=63 time=2.513 ms
64 bytes from 192.168.88.11: seq=1 ttl=63 time=1.276 ms
                                :

$ ping 192.168.66.11
PING 192.168.66.11 (192.168.66.11): 56 data bytes
64 bytes from 192.168.66.11: seq=0 ttl=63 time=1.903 ms
64 bytes from 192.168.66.11: seq=1 ttl=63 time=1.234 ms
                                 :
}}}

{{item1{檢測所有網段}}}

''1. 編輯 ./conf/netid.conf 這個設定檔''
{{{
$ nano conf/netid.conf
192.168.100.0/24
192.168.66.0/25
192.168.88.0/24
}}}

''2. 開始檢測各網段''
使用 netcheck.sh 檢測各網段的所有虛擬電腦, 此程式內定會去讀 ''./conf/netid.conf'' 這個設定檔
{{{
$ sudo ./netcheck.sh 

<html>
<head>
<link rel=stylesheet type='text/css' href='netid.css'>
</head>
<body>

<div id='192.168.100.0'>192.168.100.0/24</div>
<ol>
<li>192.168.100.1 跳過 (本機位址)
<li>192.168.100.66 ssh 
<li>192.168.100.88 ssh 
<li>192.168.100.254 ssh 
</ol>

<div id='192.168.66.0'>192.168.66.0/25</div>
<ol>
<li>192.168.66.1 跳過 (本機位址)
<li>192.168.66.11 
<li>192.168.66.126 ssh 
</ol>

<div id='192.168.88.0'>192.168.88.0/24</div>
<ol>
<li>192.168.88.1 跳過 (本機位址)
<li>192.168.88.11 
<li>192.168.88.254 ssh 
</ol>

</body>
</html>

}}}

執行 checknid.sh 這程式, 可以直接指定 Network ID (140.137.214.0/24), 如下例 :
{{{
# sudo ./checknid.sh 140.137.214.0/24
}}}

<<toBalaNotes "2">>

{{item1{移除 Lab101}}}

''1. 關閉 Lab101 所有虛擬電腦''
{{{
$ sudo virsh destroy TC660
$ sudo virsh destroy TC88

$ sudo virsh shutdown GW100
$ sudo virsh shutdown R660
$ sudo virsh shutdown R88
}}}

''2. 開始移除''
{{{
[ KVM 虛擬網路管理 ]
a) 建立  d) 刪除  q) 離開 > d

=> 開始刪除虛擬網路

刪除 HUB100 虛擬網路 (Hub) ? (y/n) y
刪除 HUB100 虛擬網路完成

刪除 HUB660 虛擬網路 (Hub) ? (y/n) y
刪除 HUB660 虛擬網路完成

刪除 HUB66128 虛擬網路 (Hub) ? (y/n) y
刪除 HUB66128 虛擬網路完成

刪除 HUB88 虛擬網路 (Hub) ? (y/n) y
刪除 HUB88 虛擬網路完成

[ KVM 虛擬電腦管理 ]
a) 建立  d) 刪除  q) 離開 > d

=> 開始刪除虛擬電腦

刪除 GW100 虛擬電腦 ? (y/n) y
刪除 GW100 虛擬電腦成功
刪除 GW100 虛擬電腦硬碟檔 ? (y/n) y
刪除 GW100.vmdk 虛擬電腦硬碟檔成功

刪除 R660 虛擬電腦 ? (y/n) y
刪除 R660 虛擬電腦成功
刪除 R660 虛擬電腦硬碟檔 ? (y/n) y
刪除 R660.vmdk 虛擬電腦硬碟檔成功

刪除 R88 虛擬電腦 ? (y/n) y
刪除 R88 虛擬電腦成功
刪除 R88 虛擬電腦硬碟檔 ? (y/n) y
刪除 R88.vmdk 虛擬電腦硬碟檔成功

刪除 TC660 虛擬電腦 ? (y/n) y
刪除 TC660 虛擬電腦成功
刪除 TC660 虛擬電腦硬碟檔 ? (y/n) y
刪除 TC660.vmdk 虛擬電腦硬碟檔成功

刪除 TC88 虛擬電腦 ? (y/n) y
刪除 TC88 虛擬電腦成功
刪除 TC88 虛擬電腦硬碟檔 ? (y/n) y
刪除 TC88.vmdk 虛擬電腦硬碟檔成功
}}}

<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. Distributed data processing with Hadoop, Part 1: Getting started
http://www.ibm.com/developerworks/linux/library/l-hadoop-1/index.html
2.Distributed data processing with Hadoop, Part 2: Going further
http://www.ibm.com/developerworks/linux/library/l-hadoop-2/index.html
3. Running Hadoop On Ubuntu Linux (Single-Node Cluster)
http://www.michael-noll.com/wiki/Running_Hadoop_On_Ubuntu_Linux_%28Single-Node_Cluster%29
4. How to Set Up a Hadoop Cluster with Mongo Support on EC2
http://artemyankov.com/post/16717104998/how-to-set-up-a-hadoop-cluster-with-mongo-support-on
5. VMware 開放源碼專案讓 Apache Hadoop 也能執行於雲端環境 
http://www.ithome.com.tw/itadm/article.php?c=74375

{{item1{建置 Hadoop 主機 : HDP119}}}
Lab301 目錄中的 HDP119 目錄, 存放直接內核開機所需的檔案
{{{
$ cd ~/iLab/Lab301/HDP119

$ sudo virsh define HDP119.xml
區域 HDP119 定義自 HDP119.xml

$ sudo virsh start HDP119
區域 HDP119 已開啟
}}}

''[重點]'' HDP119 虛擬電腦要開機成功, 必須 /home/student/iLab, /home/student/iLab/Lab301 及 /home/student/iLab/Lab301/HDP119 這三個目錄, 權限要設為 755

{{item1{Linux 虛擬電腦可直接由內核開機, 方法有二種}}}

''方法一 : HDP119.xml''
{{{
<domain type='kvm'>
  <name>HDP119</name>
  <memory>262144</memory>
  <currentMemory>262144</currentMemory>
  <vcpu>1</vcpu>
  <os>
    <type arch='i686' machine='pc-1.0'>hvm</type>
    <kernel>/home/student/iLab/Lab301/HDP119/vmlinuz32</kernel>
    <initrd>/home/student/iLab/Lab301/HDP119/tsc32.gz</initrd>
    <cmdline>ipv4=192.168.122.119:255.255.255.0:192.168.122.1:168.95.1.1 tce=sda1 restore=sda1</cmdline>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <clock offset='utc'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>restart</on_crash>
  <devices>
    <emulator>/usr/bin/kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2'/>
      <source file='/home/student/iLab/Lab301/HDP119/HDP119.qcow'/>
      <target dev='hda' bus='ide'/>
    </disk>
    <interface type='network'>
      <mac address='52:54:00:ef:40:10'/>
      <source network='default'/>
    </interface>
    <serial type='pty'>
      <target port='0'/>
    </serial>
    <console type='pty'>
      <target type='serial' port='0'/>
    </console>
    <graphics type='vnc' port='-1' autoport='yes'/>
    <video>
      <model type='cirrus' vram='9216' heads='1'/>
    </video>
  </devices>
</domain>
}}}

''方法二 : Virtual Machine Manager''

[img[img/bootfromkernel.png]]

''[註]'' 因所使用的內核, 透過核心參數  (tce=sda1 restore=sda1), 可直接掛載虛擬硬碟

{{item1{設定 JDK}}}

''1. 登入系統''
{{{
$ sudo virsh console HDP119
Connected to domain HDP119
Escape character is ^]

Core Linux
box login: tc
Password: 
}}}

''[註]'' 密碼為 student

''2. 檢視掛載硬碟內容''
{{{
$ ls -al /mnt/sda1/
total 40
drwxr-xr-x  6 root root   4096 Nov 27 14:35 .
drwxr-xr-x  4 root root     80 Apr 11 16:02 ..
drwxr-xr-x 15 tc   staff  4096 Nov  3 16:05 hadoop-1.0.4
drwxr-xr-x  8 root root   4096 Nov  2 11:14 jdk1.6.0_37
drwx------  2 root root  16384 Nov  2 02:24 lost+found
-rw-r--r--  1 root root   7746 Nov 27 14:35 mydata.tgz
drwxrwxr-x  4 tc   staff  4096 Nov  2 02:26 tce
}}}

''3. 修改 .ashrc''
{{{
$ nano .ashrc
                             :
alias ping='ping -c 4'

export JAVA_HOME=/mnt/sda1/jdk1.6.0_37
# hadoop 從 1.0 開始是不需要設定 HADOOP_HOME 這個環境變數
# export HADOOP_HOME=/mnt/sda1/hadoop-1.0.4
export PATH="$PATH:$JAVA_HOME/bin"
}}}

''5. 重新開機''
{{{
$ filetool.sh -b

$ sudo reboot
}}}

''[註]''
1. For Ubuntu 10.04 LTS, the ''sun-java6-jdk'' packages have been dropped from the Multiverse section of the Ubuntu archive. It is recommended that you use ''openjdk-6-jdk'' instead.
2. Hadoop 必須安裝 JDK, 否則無法執行 jps 命令, 來查詢啟動那些 Hadoop 服務, 

''6. 再次登入系統''
{{{
Core Linux
box login: tc
Password: 
}}}

''7. 測試 JDK''
是否為 64 位元版 ?
{{{
# java -d64 -version
Running a 64-bit JVM is not supported on this platform.
}}}

是否為 32 位元版 ?
{{{
# java -d32 -version
java version "1.6.0_37"
Java(TM) SE Runtime Environment (build 1.6.0_37-b03)
Java HotSpot(TM) Client VM (build 20.8-b03, mixed mode, sharing)
}}}

<<toBalaNotes "prep">>
{{item1{安裝 Hadoop 核心套件 : HDP119}}}

''1. 登入系統''
{{{
$ sudo virsh console HDP119
Core Linux
box login: tc
Password: 
}}}

''2. 修改 .ashrc''
{{{
$ nano .ashrc
                             :
alias ping='ping -c 4'

export JAVA_HOME=/mnt/sda1/jdk1.6.0_37
# hadoop 從 1.0 開始是不需要設定 HADOOP_HOME 這個環境變數
# export HADOOP_HOME=/mnt/sda1/hadoop-1.0.4
export PATH="$PATH:$JAVA_HOME/bin:/mnt/sda1/hadoop-1.0.4/bin"
}}}
''[註] 下載 Hadoop 套件網址''
{{{
$ wget ftp://ftp.twaren.net/Unix/Web/apache/hadoop/core/hadoop-1.0.4/hadoop-1.0.4-bin.tar.gz
}}}

''3. 重新開機''
{{{
$ filetool.sh -b

$ exit 
}}}

''4. 再次登入系統''
{{{
Core Linux
box login: tc
Password: 
}}}

''5. 檢視 Hadoop 版本''
{{{
$ hadoop version
Hadoop 1.0.4
Subversion https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1335192
Compiled by hortonfo on Tue May  8 20:31:25 UTC 2012
From source with checksum e6b0c1e23dcf76907c5fecb4b832f3be
}}}

{{item1{Standalone Operation}}}
以下操作在 HDP119 虛擬電腦執行

''1. 資料準備''
{{{
$ cd /mnt/sda1/hadoop-1.0.4/

$ mkdir input

$ cp conf/*.xml input
}}}

''2. 執行 MapReduce 程式''
{{{
$ hadoop jar hadoop-examples-1.0.4.jar grep input output 'dfs[a-z.]+' 
}}}

''[重要]'' 上面命令要執行成功, 必須要先安裝 glibc_apps.tcz 套件, 因此套件中的 getconf 命令會被使用, 如 output 已存在, 必須先刪除
{{{
$ tce-load -wi glibc_apps.tcz
$ rm -rf output/
}}}

''3. 檢視執行結果''
{{{
$ cat output/part-00000 
1	dfsadmin
}}}
<<toBalaNotes "hadoopcore">>
{{item1{Cloudera 釋出 Hadoop 雲端運算套件}}}
Cloudera 官方網址 : http://www.cloudera.com/
本文網址 : http://www.openfoundry.org/en/worldwide-news/2034

在網路業界龍頭 Google、Yahoo、微軟 (Microsoft) 與 Facebook 視為未來主流的雲端運算 (cloud computing) 中,Hadoop 是不可或缺的引擎。背景雄厚的矽谷開放源碼新創公司 Cloudera 日前釋出免費的 Hadoop Linux  散佈套件 Cloudera Distribution for Hadoop。

以協助企業運用 Hadoop 為目標的 Cloudera,基於 Apache Hadoop 最近的穩定 (stable) 版本釋出的 Linux 散佈套件,納入許多從更新版本向後移植 (back-port) 的程式修補,以及該公司為用戶發展的改進功能,可供企業處理大量資料之用。

Cloudera 的 Hadoop 散佈套件將目標放在一般的企業資料中心,打算讓所有企業都能運用大量資料處理能力。Cloudera 希望解決用戶 Hadoop 安裝、組態與支援上的所有需要。

Cloudera 執行長 Mike Olson 指出,起自於 Google 的 MapReduce 和 Google File System 研究論文,Hadoop 是一套儲存並處理 petabytes 等級資訊的雲端運算技術。除了最尖端的 IT 組織,處理這種大量資料不是成本過高就是技術上太過困難。Cloudera 希望將開放源碼社群的力量帶入此一市場。

Cloudera 共同創辦人 Jeff Hammerbacher 表示,Hadoop 提供了提取、儲存和分析前所未見大量資料的能力,然而其門檻過高使企業過去對之卻步。之前用戶可能得請一群博士才能使用 Hadoop,這在醫院或銀行等場所是一大難題。Hadoop 組態設定與管理著實令人頭痛。如今該公司提供簡易許多的方式,讓一般用戶也能親近 Hadoop。Hammerbacher 過去曾在 Facebook 籌組並帶領 Hadoop 資料團隊。

Gartner 副總裁 David Smith 認為,提供 Hadoop 給更多不同的企業,是打開雲端運算潛力的重要工作之一。

Cloudera 創辦人之一也是前 Google Hadoop 叢集經理 Christophe Bisciglia 表示,經過與 Facebook、Google 和 Yahoo 等公司建構大型 Hadoop 部署後,該公司發現到用戶希望大幅簡化 Hadoop 安裝、組態與管理。這成為該公司推出 Hadoop 散佈套件的主要原因。此外,透過提供分享程式碼、經驗與創新的共同平台,散佈套件也能進一步推動社群成長。

Hammerbacher 指出,Cloudera 計畫由軟體周遭服務與支援獲利,而非軟體銷售,該公司和 Hadoop,就像紅帽 (Red Hat) 與 Linux 的關係。

Cloudera 的 Hadoop 散佈套件以 Apache 2 軟體授權釋出供免費下載。企業可以選擇供 Red Hat Linux 使用且預先包裝的 RPM,或是 Amazon EC2 的映像檔。Cloudera 的網路組態工具可以讓企業建立符合自身需求的自定套件。Cloudera 也針對 Linux、Windows 或 Macintosh 平台,提供預先組態好的 VMware 映像檔,供免費下載測試之用。

散佈套件內含使用標準 Linux 系統管理工具進行 Hadoop 組態與部署所需的軟體。Cloudera 的 Hadoop Configurator 可為用戶的叢集產生最佳化的組態設定檔。

Cloudera 共同創辦人還包括 Oracle 前副總裁 Mike Olson、Yahoo 前工程主管 Dr. Amr Awadallah。Cloudera 在由創投公司 Accel Partners 發起的 Series A 籌資過程中已募得 5 百萬美金。

{{item1{Cloudera 採取開放核心模式 Cloudera Enterprise 增加 Hadoop 管理工具}}}
本文網址 : http://www.openfoundry.org/en/worldwide-news/2396

開放源碼分散式資料運算平台 Hadoop 的支援和服務供應商 Cloudera,日前公開第一套付費產品 Cloudera Enterprise,針對其免費散佈平台提供僅供合約訂購的附加元件套件。

Cloudera Enterprise 日前在美國加州舉行的 Hadoop 大會 (Hadoop Summit) 上公開,以若干私有管理、監控、運作工具加強 Hadoop 的功能。收費採取合約訂購方式,價格隨用戶的 Hadoop 叢集大小變動。藉著提供收費的附加軟體來加強開放源碼的 Hadoop 核心,Cloudera 這家各方矚目的矽谷新創公司採取了開放和新的企業模式。

Cloudera 執行長 Mike Olson 表示,該公司進入此一市場即將屆滿兩年,關於用戶如何使用 Hadoop,它的優點在哪裡,讓它難以佈署與運作的缺點又在哪裡,他們了解到許多。

Cloudera Enterprise 增加的額外私有工具包括提供使用者認證與存取控制的 LDAP 目錄伺服器整合,控制與管理進入 Hadoop 資料流的管控界面,以及叢集管理與運作的使用者界面。服務的訂戶也會收到維護更新和支援。

作為此一新企業產品核心的,是該公司的 Cloudera's Distribution for Hadoop (CDH)。2009 年 3 月首次推出的該平台,完全由開放源碼軟體組成,目前已經邁入第 3 版。同時發表的最新 CDH 包含了 Apache Hadoop 和 8 項額外的開放源碼專案。

其中包括 Facebook 開發類似 SQL 的查詢語言 Hive,Yahoo! 發展的低階語言 Pig,Powerset 開發的分散式資料庫 Hbase,Cloudera 自行開發的 MySQL 連接工具 Sqoop,Hadoop 工作流程系統 Oozie,應用於協同分散式服務的 Zookeeper,以及 Cloudera 採用 Apache 授權開放源碼的新專案 Flume 和 Hue。

Flume 是 Cloudera 的資料載入基礎架構,而作為 Hadoop 使用者界面 (Hadoop User Interface) 縮寫的 HUE,則是以網頁為基礎的 Hadoop GUI,也就是過去的 Cloudera Desktop。HUE 提供在 Hadoop 叢集上建立與提交工作的圖形化使用者界面,並可監控叢集狀態、瀏覽儲存資料。

過去 Cloudera 唯一的私有產品是免費的 Cloudera Desktop,如今該軟體已經以 HUE 專案開放源碼。該公司之前提供自己的 Hadoop 散佈套件和多項開放源碼工具,並提供支援、訓練、認證服務。不過 Olson 表示他們一直在計畫增加合約訂購的營收金流。

此一商業模式可歸類於開放核心,在此企業在完全開放源碼的產品上,提供額外工具或功能加以收費。越來越多開放源碼平台廠商採取此一模式,因為該模式不須強迫用戶改變正在使用的開放源碼產品,且允許企業提供用戶願意付費的加值工具。Olson 表示,如果你是平台廠商,你肯定必須提供開放源碼產品。

對於服務供應商,這種模式比銷售私有平台或僅提供服務的平台都更具吸引力。企業必須有可以銷售的產品,這個產品必須有辦法吸引用戶,開放源碼為此提供了一種解答。就 Cloudera 的部份,Olson 表示該公司相信企業需要使用者和群組認證、管理叢集大量資料匯入的視覺化工具、對傳統 IT 人員相當重要的其他管理工具,而且願意為此付費。

這些新工具使得 Cloudera 的雲端更類似傳統的 IT 架構,也更適用於服務供應商領域。換言之,這是建構於企業模式上的企業模式。Cloudera 銷售能讓雲端更像傳統 IT 的服務,服務供應商就能夠進行雲端的批發零售,針對個別項目而非整個雲端來銷售和收費。

在此一類似批發零售的模式下,不難想見 Cloudera 正在建構一個通路,聯合 Acer、AMD、Datameer、Netezza、Talend、Vertica 等合作夥伴,將各自的設備和軟體搭配 Cloudera 產品一同銷售。


///%prep
//%/

///%hadoopcore
//%/
{{item1{檢視 Linux KVM 核心模組 (內建)}}}
KVM requires a linux kernel module to support full virtualization. The linux module consists of three files: ''kvm.ko, kvm_intel.ko (for Intel processors), kvm_amd.ko (for AMD processors)''. You can install these modules just like you install drivers for your video card. The good news is, you may not need to install anything - the 2.6.20 kernel version (and higher) includes these kernel modules as part of the mainline kernel. It will depend on your distribution configuration whether these modules are actually in the distribution kernel as a built-in or provided as modules (or possibly absent). Here is a table listing the relation of KVM module versions to mainline kernel releases:
{{{
- 2.6.20 kvm-12
- 2.6.21 kvm-17
- 2.6.22 kvm-22 
}}}

''檢視命令, 如下 :''
{{{
$ sudo modprobe -l | grep kvm
kernel/arch/x86/kvm/kvm.ko
kernel/arch/x86/kvm/kvm-intel.ko
kernel/arch/x86/kvm/kvm-amd.ko
}}}

If you want to use the latest version of ''KVM kernel modules'' and supporting userspace, you can download the latest version from http://sourceforge.net/project/showfiles.php?group_id=180599. 

{{item1{檢視 kvm.ko 詳細資訊}}}
{{{
$ modinfo kvm
filename:       /lib/modules/3.2.0-59-generic/kernel/arch/x86/kvm/kvm.ko
license:        GPL
author:         Qumranet
srcversion:     E35B1069B8C1647A91264E2
depends:        
intree:         Y
vermagic:       3.2.0-59-generic SMP mod_unload modversions 
parm:           min_timer_period_us:uint
parm:           oos_shadow:bool
parm:           ignore_msrs:bool
parm:           allow_unsafe_assigned_interrupts:Enable device assignment on platforms without interrupt remapping support. (bool)
}}}

{{item1{檢查 CPU 是否有支援 KVM ?}}}
{{{
$ kvm-ok
程式 'kvm-ok' 目前尚未安裝。  您可以由輸入以下內容安裝:
sudo apt-get install cpu-checker

$ sudo apt-get install cpu-checker
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  msr-tools
下列【新】套件將會被安裝:
  cpu-checker msr-tools
升級 0 個,新安裝 2 個,移除 0 個,有 3 個未被升級。
需要下載 15.6 kB 的套件檔。
此操作完成之後,會多佔用 123 kB 的磁碟空間。
是否繼續進行 [Y/n]?

student@UB1204:~$ sudo kvm-ok
INFO: /dev/kvm does not exist
HINT:   sudo modprobe kvm_intel
INFO: Your CPU supports KVM extensions
KVM acceleration can be used
}}}

''[注意]'' 如 CPU (VT-x,AMD-V) 沒有支援 KVM, 你還是可以安裝 KVM 套件, 只是系統會以 QEMU 來執行虛擬主機 (速度很慢)

{{item1{開始安裝 KVM}}}
{{{
$ kvm
程式 'kvm' 目前尚未安裝。  您可以由輸入以下內容安裝:
sudo apt-get install qemu-kvm

student@UB1204:~$ sudo apt-get install qemu-kvm
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  bridge-utils kvm-ipxe libaio1 librados2 librbd1 qemu-common qemu-utils
  seabios vgabios
建議套件:
  mol-drivers-macosx openbios-sparc ubuntu-vm-builder uml-utilities
下列【新】套件將會被安裝:
  bridge-utils kvm-ipxe libaio1 librados2 librbd1 qemu-common qemu-kvm
  qemu-utils seabios vgabios
升級 0 個,新安裝 10 個,移除 0 個,有 0 個未被升級。
需要下載 5,326 kB 的套件檔。
此操作完成之後,會多佔用 17.6 MB 的磁碟空間。
是否繼續進行 [Y/n]?y
                            :
                            :
正在設定 bridge-utils (1.5-2ubuntu7) ...
正在設定 libaio1 (0.3.109-2ubuntu1) ...
正在設定 seabios (0.6.2-0ubuntu2.1) ...
正在設定 vgabios (0.6c-2ubuntu3) ...
正在設定 qemu-common (1.0+noroms-0ubuntu14.13) ...
正在設定 librados2 (0.41-1ubuntu2.1) ...
正在設定 librbd1 (0.41-1ubuntu2.1) ...
正在設定 qemu-utils (1.0+noroms-0ubuntu14.13) ...
正在設定 qemu-kvm (1.0+noroms-0ubuntu14.13) ...
qemu-kvm start/running
正在設定 kvm-ipxe (1.0.0+git-3.55f6c88-0ubuntu1) ...
正在進行 libc-bin 的觸發程式 ...
ldconfig deferred processing now taking place
}}}

{{item1{檢視 KVM 系統是否啟動 ?}}}
{{{
$ lsmod | grep kvm
kvm_intel             137721  0 
kvm                   415459  1 kvm_intel

$ kvm --version
QEMU emulator version 1.0 (qemu-kvm-1.0), Copyright (c) 2003-2008 Fabrice Bellard
}}}

{{item1{將 student 帳號加入 kvm 群組}}}
{{{
$ cat /etc/group | grep kvm
kvm:x:126:

$ sudo adduser student kvm
[sudo] password for student:
正將 `student' 使用者新增至 `kvm' 群組 ...
正在將使用者“student”加入到“kvm”群組中
完成。

$ cat /etc/group | grep kvm
kvm:x:126:student
}}}
<<toBalaNotes "1">>


///%kvmsetup
//%/

///%virsh
//%/

///%1
//%/
{{item1{快速建置企業虛擬基礎網路架構}}}
此一企業虛擬基礎網路架構, 主要是使用 FloppyFW 系統來建構


[img[img/ffw.png]]

{{item1{開始建置}}}
''1. 執行 MakeKC.sh''
{{{
$ sudo ./makeKC.sh FloppyFW3.0/

[ 虛擬網路管理 ]
a) 建立  d) 刪除  q) 離開 > a

=> 開始建立虛擬網路

建立 HUB100 虛擬網路 (Hub) ? (y/n) y
啟動 HUB100 虛擬網路完成

建立 HUB88 虛擬網路 (Hub) ? (y/n) y
啟動 HUB88 虛擬網路完成

建立 HUB99 虛擬網路 (Hub) ? (y/n) y
啟動 HUB99 虛擬網路完成

[ 虛擬電腦管理 ]
a) 建立  d) 刪除  q) 離開 > a

=> 開始建立虛擬電腦

建立 ffwNAT 虛擬電腦 ? (y/n) y
複製 ffwNAT.img 檔案 ... 成功
建立 ffwNAT 虛擬電腦完成

建立 ffwR1 虛擬電腦 ? (y/n) y
複製 ffwR1.img 檔案 ... 成功
建立 ffwR1 虛擬電腦完成

建立 ffwR2 虛擬電腦 ? (y/n) y
複製 ffwR2.img 檔案 ... 成功
建立 ffwR2 虛擬電腦完成

建立 ffwS1 虛擬電腦 ? (y/n) y
複製 ffwS1.img 檔案 ... 成功
建立 ffwS1 虛擬電腦完成

建立 ffwS2 虛擬電腦 ? (y/n) y
複製 ffwS2.img 檔案 ... 成功
建立 ffwS2 虛擬電腦完成
}}}

''2. 啟動 FloppyFW 虛擬系統''
{{{
$ virsh start ffwR1
區域 ffwR1 已開啟
}}}

''3. 登入 FloppyFW 虛擬系統''
{{{
$ virsh console ffwR1
Connected to domain ffwR1
Escape character is ^]
22:15:34.501: error : vshRunConsole:77 : unable to open tty /dev/pts/1: 拒絕不符權限的操作


student@KVMFS:~/KVMLab/MakeKC$ sudo virsh console ffwR1
Connected to domain ffwR1
Escape character is ^]

                        -=[ FloppyFW 3.0.14 ]=-

                            Thomasez@zelow.no
                      http://www.zelow.no/floppyfw/
ffwR1 login: root


BusyBox v1.11.2 (2010-11-28 05:17:40 MST) built-in shell (ash)
Enter 'help' for a list of built-in commands.

# 
}}}

''4. 離開 FloppyFW 虛擬系統''

請按 Ctrl + ] 組合鍵

''5. 停止 FloppyFW 虛擬系統''
{{{
$ virsh destroy ffwR1
區域 ffwR1 已經刪除
}}}

<<toBalaNotes "1">>

{{item1{快速移除}}}
{{{
$ sudo ./makeKC.sh FloppyFW3.0/


[ 虛擬網路管理 ]
a) 建立  d) 刪除  q) 離開 > d

=> 開始刪除虛擬網路

刪除 HUB100 虛擬網路 (Hub) ? (y/n) y
刪除 HUB100 虛擬網路完成

刪除 HUB88 虛擬網路 (Hub) ? (y/n) y
刪除 HUB88 虛擬網路完成

刪除 HUB99 虛擬網路 (Hub) ? (y/n) y
刪除 HUB99 虛擬網路完成

[ 虛擬電腦管理 ]
a) 建立  d) 刪除  q) 離開 > d

=> 開始刪除虛擬電腦

刪除 ffwNAT 虛擬電腦 ? (y/n) y
刪除 ffwNAT 虛擬電腦成功
刪除 ffwNAT 虛擬電腦硬碟檔 ? (y/n) y
刪除 ffwNAT.img 虛擬電腦硬碟檔成功

刪除 ffwR1 虛擬電腦 ? (y/n) y
刪除 ffwR1 虛擬電腦成功
刪除 ffwR1 虛擬電腦硬碟檔 ? (y/n) y
刪除 ffwR1.img 虛擬電腦硬碟檔成功

刪除 ffwR2 虛擬電腦 ? (y/n) y
刪除 ffwR2 虛擬電腦成功
刪除 ffwR2 虛擬電腦硬碟檔 ? (y/n) y
刪除 ffwR2.img 虛擬電腦硬碟檔成功

刪除 ffwS1 虛擬電腦 ? (y/n) y
刪除 ffwS1 虛擬電腦成功
刪除 ffwS1 虛擬電腦硬碟檔 ? (y/n) y
刪除 ffwS1.img 虛擬電腦硬碟檔成功

刪除 ffwS2 虛擬電腦 ? (y/n) y
刪除 ffwS2 虛擬電腦成功
刪除 ffwS2 虛擬電腦硬碟檔 ? (y/n) y
刪除 ffwS2.img 虛擬電腦硬碟檔成功
}}}

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
{{item1{實例 1}}}
{{{
> Hello,
> has anyone a Linux installed PowerEdge R210 Server running ?
> i am not sure if Linux KVM (kernel virtual mashine) will work on this
> Server.
> or has anyone allready Linux KVM up and running ?
> 
> i need the output of this : ""egrep '^flags.*(vmx|svm)' /proc/cpuinfo""
> 
> thanks
}}}
Works wonderful.
I've switched most machines to from VMWare to KVM now.
I am running Gentoo on top of a PowerEdge R710 with 11 VMs on it and the 
average load is 0.30 :)

[[Dell PowerEdge R710 (不到十萬)|http://www1.ap.dell.com/tw/zh/business/Servers/server-poweredge-r710/pd.aspx?refid=server-poweredge-r710&cs=twbsd1&s=bsd]]

{{item1{實例 2}}}
Ubuntu Server 10.04 - 64 位元系統, 安裝在以下的主機

''主戰機的硬體規格''
{{{
cpu:
  Intel(R) Core(TM)2 Duo CPU     E8400  @ 3.00GHz, 2003 MHz

network:
  eth1                 Intel 82567LM-3 Gigabit Network Connection (沒有啟動)
  eth0                 Realtek RT8139

disk:
  /dev/sda             Hitachi HDT72101 

Memory Size: 3 GB + 768 MB
}}}

可同時啟動 4 部 Windows Server 2003 (記憶體 512 M), 2 部 Ubuntu Desktop 10.04 (記憶體 512 M), 遠端使用 VNC 連接操作, 畫面會有一點頓, 但還能接受

<<toBalaNotes "fire">>


///%fire
//%/
''參考文章''
1. 虛擬主機 XML 設定檔說明
http://libvirt.org/format.html
2. 如何變更遠端桌面聽候的連接埠 (Windows XP/2003)
http://support.microsoft.com/kb/306759

{{item1{自動佈署程式 - 範例 1}}}
{{{
#!/bin/bash
d=$(pwd)
[ "$d" != "/root/172.30.100.0" ] && echo "need to be a root" && exit 1

virsh net-define /root/172.30.100.0/172-30-100-Host-Only.xml
virsh net-start 172-30-100-Host-Only
virsh net-autostart 172-30-100-Host-Only

virsh define /root/172.30.100.0/172-30-100-US104-NAT.xml
virsh start 172-30-100-US104-NAT

virsh define /root/172.30.100.0/172-30-100-US104-RD.xml
virsh start 172-30-100-US104-RD

virsh define /root/172.30.100.0/172-30-100-UD104-WK.xml
virsh start 172-30-100-UD104-WK

}}}

<<toBalaNotes "1">>

{{item1{自動佈署程式 - 範例 2}}}
{{{
clear
read -p "Enter VM name : " vm

vl=$(virsh list --all)
echo $vl | grep $vm &>/dev/null
[ "$?" == "0" ] && virsh undefine $vm &>/dev/null && echo "ok" && exit 0

virsh define Lab101/${vm}.xml &>/dev/null
[ "$?" == "0" ] && echo "create ok" && exit 0

echo "NG" 
exit 1 
}}}

<<toBalaNotes "2">>

{{item1{自動佈署程式 - 範例 3}}}
{{{
#!/bin/bash

d=$(whoami)
[ "$d" != "root" ] && echo "need to be a root" && exit 1

d=$(pwd)
[ "$d" != "/root/172.30.66.0" ] && echo "please change to /root/172.30.66.0" && exit 1

clear

#
# Undefine Virtual Machine 
#
declare -a vmname
vmname[0]="172-30-66-US104-DM"
vmname[1]="172-30-66-US104-Router"
vmname[2]="172-30-66-W2K3"

vmlist=$(virsh list --all)
flag=0
index=0
while [ $index -lt 3 ]
do 
  echo $vmlist | grep "${vmname[$index]}" &> /dev/null
  if [ "$?" == "0" ]; then 
     echo ""
     read -p "undefine ${vmname[$index]} ? (y/n) " ans
     if [ "$ans" == "y" ]; then
        virsh undefine ${vmname[$index]} &> /dev/null 
        [ "$?" != "0" ] && echo "please stop ${vmname[$index]}" && exit 1
        echo "undefine ${vmname[$index]} ok"  
        flag=1
     else
        echo "bye" && exit 1  
     fi
  fi
  let "index=$index+1"
done

#
# Define / Undefine Virtual Network
#
netname="172-30-66-Host-Only"

virsh net-list | grep "$netname" &> /dev/null
if [ "$?" != "0" ]; then
   echo ""
   read -p "define virtual network $netname ? (y/n) " ans
   if [ "$ans" == "y" ]; then
      virsh net-define /root/172.30.66.0/"$netname".xml &> /dev/null
      virsh net-start "$netname" &> /dev/null 
      virsh net-autostart "$netname" &> /dev/null
      echo "$netname start"
   fi
else
   echo ""
   read -p "undefine virtual network $netname ? (y/n) " ans
   if [ "$ans" == "y" ]; then
      virsh net-destroy "$netname" &> /dev/null
      virsh net-undefine "$netname" &> /dev/null
      echo "undefine $netname ok"
      flag=1
   fi
fi

[ "$flag" == "1" ] && exit 0
#
# Define Virtual Machine 
#
echo ""
read -p "define ${vmname[0]}, ${vmname[1]}, ${vmname[2]} ? (y/n) " ans
if [ "$ans" == "y" ]; then
   index=0
   while [ $index -lt 3 ]
   do 
     virsh define /root/172.30.66.0/"${vmname[$index]}".xml &> /dev/null
     # virsh start "${vmname[$index]}" &> /dev/null
     [ "$?" == "0" ] && echo "define ${vmname[$index]} ok"

     let "index=$index+1"
   done
fi 

exit 0
}}}

<<toBalaNotes "3">>


///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. 初探虛擬交換器 (必讀)
http://www.ithome.com.tw/itadm/article.php?c=62284&s=1
2. 如何架設VMware分散式虛擬交換器 
http://www.ithome.com.tw/itadm/article.php?c=61046
3. 認識邊緣網路架構 VEB、VN-link、VEPA 技術介紹 (一定要看)
http://www.netadmin.com.tw/article_content.aspx?sn=1112070005

{{item1{認識 虛擬交換器}}}
虛擬交換器是由 Hypervisor 模擬出來的軟體物件,主要提供虛擬機器橋接實體網路,及相互連線。隨著虛擬化技術的廣為使用,一些問題也隨之產生,像是虛擬交換器的網路設定,不能隨著虛擬機器的線上移轉而移動,因此 VMware 開發了新的分散式虛擬交換器,可以解決以上的問題,並將技術開放給 Cisco,開發第 3 方的虛擬交換器軟體 ''Nexus 1000V'',提供更強的網路功能,同時,新的技術標準也正待 IEEE 通過,虛擬網路的流量交換,未來可望改由實體的交換器負責。 

{{item1{虛擬交換器的類型}}}
一般而言,虛擬交換器可以分為 ''External'' 及 ''Private'' 等 2 種類型。

''External 交換器''可以將虛擬平臺伺服器的實體網卡指派為 Uplink 埠使用,提供虛擬機器連接實體網路,不僅如此,許多的虛擬交換器還支援業界共通的 802.3ad 標準,當 Uplink 埠的數量大於 1  組時,我們可以透過這項功能,將它們集結成為群組,讓虛擬與實體交換器之間的傳輸頻寬得以倍增。

相對於前者,''Private 交換器''則不具備 Uplink 埠,因此與實體環境隔離,形成全然封閉的網路環境,這種虛擬交換器,一般是提供給企業架設不需要連接實體網路的測試平臺。

比較特別的是,微軟的 Hyper-V 除了 External、Private 等 2 種虛擬交換器外,還提供了一種 Internal 交換器,它和 Private 交換器的唯一差別之處,位於 Internal 交換器上的虛擬機器,可以連接 Hyper-V 的 Host OS,也就是底層的 Windows Server 2008,藉此增加虛擬交換器的使用彈性。

交換器在企業內部,屬於隨處可見的網路基礎設施,當需要連線的電腦太多,造成交換器的網路埠數量不敷使用時,通常會以串接其他交換器的方式做為擴充,反觀虛擬交換器,因為本身擁有為數可觀的虛擬網路埠,而沒有此一問題。

像是 VMware 的 vSphere,及它的精簡版本 ESXi,一臺虛擬交換器最多可以提供 1,016 臺虛擬機器連接網路,所以 2 臺虛擬交換器之間,通常不會相互連線。

除非是基於安全上的需求,而將一臺具備防火牆,或者是 IPS 等防護功能的虛擬設備(Virtual Appliance),放置在 External 交換器,及虛擬機器所在的 Private 交換器中間,提供流量檢測的功能。例如 iThome 在 2008 年曾經測試過的 Reflex System VSA,就是該類型防護產品當中的其中一種。 

''@@color:red;VMware Workstation 網路架構 : Bridge (External 虛擬交換器), NAT 及 Host Only (Private or Internal 虛擬交換器外)@@''

{{item1{具備防護虛擬機器的功能}}}
虛擬交換器除了提供網路流量的交換服務外,也和虛擬機器的安全防護息息相關。

最為常見的做法,就是透過交換器,將內部網路切割為多個 VLAN,由於每個 VLAN 都是各自分開的廣播網域,如此一來,當網路型的病毒在內部爆發時,就不能任意透過廣播的方式擴散到所有電腦。

另外,像是 VMware 虛擬交換器的 PortGroup,也可以算是一種類似的應用,我們可以根據虛擬機器的性質,而對應到不同的 PortGroup,並各自套用不同的交換器設定,而各個 PortGroup,還可以指派不同的 VLAN id,便於企業能彈性管理虛擬網路。

透過 VMware 虛擬交換器的設定介面,我們最多可以在一臺虛擬交換器設定 512 個 PortGroup,而每個 PortGroup 可以包含1到多個不等的虛擬網路埠。以每臺 vSphere(含舊版ESX)/ESXi版本的虛擬平臺伺服器來說,其 PortGroup 最大值為 4,096 個,和虛擬網路埠的最大值相等。

''VMsafe'' 也和虛擬交換器有關,它是 VMware 在 vSphere 及 ESXi 4.0 的新版伺服器虛擬化平臺,所提供的一種防護技術,其做法是透過一臺裝有防護產品的虛擬設備,替代原本安裝在各臺虛擬機器上的主機型防護軟體,達到節省硬體資源使用的目的。 

{{item1{虛擬機器的線上移轉}}}
而虛擬機器的線上移轉,是伺服器虛擬化之後,最重要的一項功能應用,當底層的虛擬平臺伺服器需要停機維修,或者負載過高時,我們可以在虛擬機器保持服務的前提下,將其轉移到另外一臺虛擬平臺伺服器繼續運作,除了 VMware 的 VMotion 外,像是微軟的 Hyper-V,及 Citrix 的 XenServer 等伺服器虛擬化套件,都有相同的移轉功能。

對於 IT 人員來說,這類型的移轉功能,雖能保證虛擬機器和實體網路之間的連線不會中斷,然而,位於原本虛擬交換器上的網路設定,並不會隨之移動,必須在移轉過去的另外一臺虛擬平臺伺服器,以手動方式設定。

為了解決這個管理上的難題,VMware 後來決定正式在 vSphere 及 ESXi 4.0 的版本,額外提供了分散式虛擬交換器(vNetwork Distributed Switch,vDS)的新功能,使得網路設定,終於可以隨著虛擬機器的線上移轉而同時移動。 
<<toBalaNotes "1">>

{{item1{IEEE 802.1Qbg 標準}}}
本文網址 : http://www.ithome.com.tw/itadm/article.php?c=61963

''重 點''
*802.1Qbg 標準將在 6~9 個月間通過,改變現有虛擬環境網路架構
*交換器廠商 Extreme 於 4 月成功展示此新標準的技術應用

虛擬化大幅改變了機房與 IT 應用的面貌,為了配合虛擬化的應用,網路設備廠商推動的 IEEE 802.1Qbg 標準,預計將在 6~9 個月內通過,這將會讓交換器可以取代現在虛擬化平臺中虛擬交換器(Virtual Switch,vSwitch)的功能,替未來虛擬環境中的網路架構帶來極大變革。在今年 4 月於拉斯維加斯舉辦的 Interop 展上,交換器廠商 Extreme 成功在 KVM 的虛擬化平臺上,做到了新標準的功能展示,未來當標準通過,所有的交換器廠商都能夠用類似的方法,與不同的虛擬化平臺合作。

''能滿足虛擬機器搬移的 SLA 設定需求,也能強化安全''
未來符合此一標準的交換器設備,將有能力直接做到現在虛擬交換器的功能,透過 ''Virtual Ethernet Port Aggregator(VEPA)''這一新功能的協助,虛擬機器的流量,未來將能夠直接將流量導到實體交換器,由實體交換器直接處理這些流量,然後再回傳至要溝通的虛擬機器上。

新標準的作法隨之而來也會帶來一些新的優點,比如說,因為可以不再使用虛擬交換器,管理難度將大幅減低。特別是進行類似VMotion 的虛擬機器搬移功能時,由於在新標準的作法中,虛擬機器的溝通都透過實體交換器,所以交換器將有能力掌握 SLA 設定的資訊,例如 QoS 等等,所以當虛擬機器搬移到另一臺實體機器時,這些資訊能夠透過交換器自動傳輸給對應的另一臺交換器,然後做到自動的 SLA 設定,減少現在手動設定的問題,管理上的問題也能紓解。

在現在的狀況下,虛擬機器搬移到另一臺實體伺服器後,因為往往連接的不是同一臺交換器,此時,網路相關的 SLA 設定並不會跟著過去,兩臺交換器間也沒有辦法辨識出這是同一臺虛擬機器,所以在IT部門中,管理網路的IT人員,必須去找管理虛擬交換器的伺服器管理人員,共同設定,造成管理流程上非常複雜。

此外,安全上也能加強。所以針對 DDoS 此類的攻擊,未來實體交換器將有辦法針對同一臺實體機器中,不同的虛擬機器分別阻斷流量。

''VEPA 和 802.1Q-2005 的解禁,是標準上的最大突破''
802.1Qbg 標準的突破,最大的意義在於 VEPA 的建立,以及 802.1Q-2005 標準的解禁。最早發明虛擬化平臺的設計者,因為無法解決交換器無法認得同一臺伺服器中虛擬機器的問題,於是透過軟體設計了虛擬交換器,提供基本的第二層交換器功能,做到同一臺實體機器上,虛擬機器間的互連。

而隨著虛擬化的應用越來越廣,虛擬機器跨實體機器的搬移,以及虛擬交換器數量增加,都造成了管理上的困難。舉例來說,4月成功展出 802.1Qbg 新功能的 Extreme 資料中心解決方案總監 Kevin Ryan,就在來臺記者會上表示,如果把虛擬交換器都計算進去,在12個42U高的機櫃,放滿了576刀的刀鋒伺服器這樣的環境下,如果把實體和虛擬的交換器都計算進去,使用者就需要808臺交換器。網路設備的管理上,也從傳統的3層式網路架構,多了兩層由伺服器管理人員負責的虛擬交換器和刀鋒機箱交換器模組,管理難度增加很多。

但是,Kevin Ryan 指出,如果改採用支援新標準的交換器來做同樣環境的網路架構,交換器總數就減少到了僅需要4臺,而且使用者的IT單位,也只要由管理網路的IT人員就能全權負責處理。

在現在的虛擬化環境中,每臺虛擬機器建立的時候,都會連帶建立虛擬網卡(vNIC)、虛擬連接介面,與串連這些虛擬機器的虛擬交換器。然後每個 vNIC 出來的流量,透過虛擬交換器轉接到實體網路卡(NIC)的對外連接埠上,與實體交換器的連接埠相連。每臺虛擬機器有可能共享不同的實體網卡,也因此任兩臺虛擬機器要溝通的時候,現在的架構是允許不用透過實體交換器。

而新的 VEPA 則取代了虛擬交換器的角色,限定虛擬機器的流量不能直接透過 VEPA 去轉接,而是必須先由實體網卡出去,傳輸到實體交換器,由實體交換器處理了封包轉送的工作,再傳回去。雖然原理還是相同,仍然是透過同一張實體網卡共享的 vNIC 來做到虛擬機器間的互通,不過因為透過實體交換器處理過去虛擬交換器的工作,所以伺服器的運算資源不會被耗用,也能做到前述的優點,如 SLA 設定隨著虛擬機器搬移而移動、安全阻斷流量等等。

另一個重要的突破,則是 802.1Q-2005 標準在前述 VEPA 這樣應用上的的解禁。由於過去 802.1Q-2005 標準規定,當實體交換器的一個連接埠在進行封包轉送的時候,除了這個連接埠以外的其他連接埠,是潛在的資料傳送埠。也就是說,在這樣的標準規定下,虛擬機器資料是不能透過同一個實體交換器的連接埠,同時進行資料的發送與接收。這就使得上述 VEPA 的應用,成為不可能。因為同一臺實體伺服器上的虛擬機器,是共用同一個交換器的同一個連接埠。但是在這次的 ''802.1Qbg'' 標準中,這樣的規定被解禁了,VEPA 成為 802.1Q-2005 標準的例外,允許交換器能夠在和 VEPA 溝通的時候,同時以單埠傳輸與接收資料。這樣的做法,也才讓 ''802.1Qbg'' 的構想可行。文⊙劉哲銘 

[img[img/kvm/vswitch.jpg]]

[img[img/kvm/vswitch1.jpg]]

<<toBalaNotes "qbg">>


///%qbg
//%/

///%1
//%/
<<forEachTiddler 
 where
 'tiddler.tags.length == 0'
>>
''參考文章''
1. 新一代的企業個人電腦 - 虛擬化桌面
http://www.sysage.com.tw/guest/GoGoBuyOne.aspx?id=21

『史上最大電腦換機潮,未來 2 年達 2.7 億台』這是去年 10 月第 1142 期商業周刊的封面主題。封面文章敘述著千禧年之後,全球的 PC 市場因為持續經歷了網路泡沫、 SARS  危機、以及史上最大金融海嘯的問題,所以一直沒有將真正的潛力爆發出來。而最強的換機潮終於要在 2010~2011 年正式登場,金融海嘯後的經濟復甦,以及微軟   Windows  7  的推出正是這波PC  大幅汰舊換新的主要原因。

根據  IDC  的預估, Windows 7  在 2010 年將佔有企業購買微軟作業系統的 49.5% ,相當於高達 5 千 8 百萬份的出貨量。而許多既有的個人電腦 (PC) 硬體勢必也將因為規格老舊,無法配合諸如螢幕觸控、多媒體應用等   Windows  7  的嶄新功能而需要被汰舊換新,因此造就一波換機潮。商業周刊提到這波換機潮的主要新興個人終端電腦設備產品包括小筆電、以及嘗試取代個人電腦的  AIO( 一體成型電腦 ) 等等。

''Apple. 推出 .iPad. 平板電腦,搶佔個人終端設備市場''

不過,資訊產業的特色就是快速多變,預估與規劃總是趕不上實際的變化。今年 4 月份起才逐步在全球各地銷售的蘋果 (Apple) 公司全新終端設備— iPad  平板電腦,在美國市場一推出便造成熱銷,而將來勢必也會在個人終端設備市場中佔領重要的一席地位,並可能改變商業周刊去年 10 月封面故事對個人電腦的銷售預估數字。

iPad  可說是   iPhone  的放大版,重量僅 0.7 公斤而擁有 9.7 吋的觸控螢幕,並傳承了  Apple  產品一貫的強大功能及酷炫外型;其定位介於電腦與智慧型手機之間,強調更適合處理上網、收發郵件、觀賞多媒體資料、及閱讀電子書等等工作。 2001 年以來累計達 2 億 5 千萬台   iPod ,以及光是 2009 年就有 2 千 7 百萬台iPhone  的銷售亮眼成績便是大家預期   iPad  也會成功的最有力依據,功能多樣又先進的  iPad  想必讓許多人都會想擁有它。

但是外出時到底要帶筆記型電腦出門,還是帶  iPad出門呢?帶筆記型電腦出門怕不夠炫,帶  iPad 出門又怕因為不是微軟  Windows 作業平台而無法處理公事,真是傷腦筋!難道要當雙槍俠,隨時帶著兩台設備出門嗎?那也太累了吧!要是有個兩全齊美的方法,能將  Windows  7 放在 iPad 上執行,讓比爾蓋茲 (Bill Gates) 和史帝夫賈伯斯 (Steve Jobs) 兩大資訊業界奇才的最新創作整合起來同時為你服務,那就太帥了。

''桌面虛擬化實現夢想''

感謝雲端運算及虛擬化技術的快速發展,兩全齊美的方法其實已經存在了。虛擬化技術的發展非常快速,在完成伺服器及應用程式的虛擬化之後,已經將觸角延伸到了個人電腦的桌面,讓用戶端的作業系統及應用軟體能夠與硬體設備脫離,不需要再永遠被綁在一起了。所以,具備員工個人電腦桌面虛擬化(Desktop Virtualization)  能力的企業資訊中心,便能將員工上班所使用的電腦作業平台、應用程式、及個人化設定包裹起來在機房執行,再透過網路將結果傳送到員工手上諸如  iPad 、智慧型手機等設備。


這樣,員工隨時隨地可以感覺好像身在辦公室一樣的處理公事,電腦畫面及使用習慣都不需有所更動,而公司則可獲得員工更多的產值,真是兩全齊美啊。這種新一代的桌面虛擬化架構讓專家也都很看好其未來的發展,例如   IDC 便預估 2010 年雲端服務將漸趨成熟,桌面虛擬化的普及度更將提升至 34% 。而   Gartner  在 2009 年 12 月發表的“ 2010 年用戶終端  PC  預估 (Predicts 2010: PC End-User  Issues) ”文章中也預期在 2012 年的時候,60% 的全球企業  PC 將使用桌面虛擬化的技術。

''桌面虛擬化:員工、企業、資訊人員 3 贏的架構''
    
目前,伺服器及應用程式虛擬化架構是將機房中企業及員工所需要用到的伺服器作業系統及如   ERP 、C R M  等軟體程式自伺服器硬體中脫離出來,並且包裹起來成為所謂的 “Bubble  ( 泡泡 )” 形式在共用的硬體平台上執行,如果硬體有問題或需升級時,“Bubble” 的軟體包便很容易地隨時搬移到另一個硬體上執行,省時、省事又安全。

'而企業個人電腦桌面虛擬化的運作便是延續伺服器及應用程式虛擬化的架構而來的。但這次 “Bubble” 中要包裹的則是員工   PC  的作業系統、個人桌面設定、以及如郵件收發、文書處理等應用程式。企業機房中需要添購專門執行桌面虛擬化的硬體伺服器,在收到員工的連線需求之後,便以 “On-demand” 的方式立即將該員工所需的 “Bubble” 軟體包裹、執行,然後快速地將結果畫面透過區域或廣域網路傳送到員工的終端設備上,來達到桌面虛擬化的功能。

<<toBalaNotes "vdesktop">>


///%vdesktop
//%/
無線電源聯盟在上週六公佈第一個標準並取名為「Qi」(氣),可以將需要充電的設備放置在一個平板上充電,最高功率為五瓦。該聯盟並並將開始發展下一個標準,最高功率為120瓦,足以對筆記型電腦等中度耗電設備充電或提供電源。

目前「Qi」規範為五瓦電力,主要是針對手機、藍牙耳機等設備充電。充電的平板可以嵌入桌面,將待充電設備放在平板上即可充電,類似前Palm公司所推出的手機無線充電裝置。勁量將推出採用此標準的充電器,可以一次對兩台智慧手機充電,主體售價約為100美元,轉接到iPhone、黑莓機的器材售價約在 30-40美元。

無線電源聯盟耗費18個月才確定這個標準,會員以外的公司要到八月底才能取得標準內容,產品需經過獨立實驗室驗證以取得「Qi」的標章。目前主要的手機廠商如Nokia、HTC等均已經加入會員。

該聯盟並未針對「隔空」無線充電提出標準,主要因為效率太低,其次是大眾對強力磁場還是有所忌諱。接收器直接接觸磁場的模式效率較高,也減少磁場對其他物品的影響。

由於無線充電/電源技術日漸成熟,為避免設備只能使用特定廠商的技術或產品,讓各家產品無法通用,所以該聯盟推出標準,以利推廣無線電源。iSuppli在六月底曾經預估今年無線電力的市場規模約為360萬美元,到2014年則可能成長至2.349億美元。(編譯/沈經)
{{item1{虛擬主機的啟動}}}

''方法 1 :  VMM 啟動虛擬主機 (圖形介面)''

[img[img/kvm/vmmstart.png]]

''方法 2 : virsh 啟動虛擬主機 (命令模式)''
{{{
$ virsh start US1041-NAT
}}}

使用 Virtual Machine Manager 或 virsh 啟動虛擬主機, 事實上是在 Linux 系統中執行 ''kvm'' 命令, 如下 : 
{{{
$ /usr/bin/kvm -S -M pc-0.12 -enable-kvm -m 256 -smp 1 -name 172-30-100-US104-RD -uuid 74dd5321-20a2-3742-500b-3036fc8a6f85 -chardev socket,id=monitor,path=/var/lib/libvirt/qemu/172-30-100-US104-RD.monitor,server,nowait -monitor chardev:monitor -boot c -drive file=/root/172.30.100.0/US104_100_RD.img,if=virtio,index=0,boot=on -net nic,macaddr=52:54:00:30:64:20,vlan=0,model=virtio,name=virtio.0 -net tap,fd=38,vlan=0,name=tap.0 -chardev pty,id=serial0 -serial chardev:serial0 -parallel none -usb -vnc 0.0.0.0:1 -k en-us -vga cirrus
}}}

{{item1{虛擬主機的關閉}}}

''方法 1 : VMM 關閉虛擬主機 (圖形介面)''

[img[img/kvm/vmmstop.png]]

''方法 2 : 關閉虛擬主機 (virsh 命令)''
{{{
$ virsh shutdown US1041-NAT  (必須虛擬系統能處理關機命令)
}}}

''方法 3 : 強迫關閉虛擬主機 (virsh 命令)''
{{{
$ virsh destroy US1041-NAT
}}}

<<toBalaNotes "1">>

{{item1{資源使用狀態 : virt-top}}}
網址 : http://people.redhat.com/~rjones/virt-top/index.html

virt-top is a top-like utility for showing stats of virtualized domains. Many keys and command line options are the same as for ordinary top.

It uses libvirt so it capable of showing stats across a variety of different virtualization systems. 

[img[img/kvm/virttop.png]]

{{item1{虛擬主機快照}}}

''1. Save a guest''
Save the current state of a guest to a file using the virsh command:
{{{
# virsh save US104 US104.save
}}}
''[註]'' 執行上面命令, 必須啟動虛擬主機

This stops the guest you specify and saves the data to a file, which may take some time given the amount of memory in use by your guest. You can restore the state of the guest with the restore (Restore a guest) option. Save is similar to pause, instead of just pausing a guest the present state of the guest is saved.

''2. Restore a guest''
Restore a guest previously saved with the virsh save command (Save a guest) using virsh:
{{{
# virsh restore US104.save
}}}
This restarts the saved guest, which may take some time. The guest's name and UUID are preserved but are allocated for a new id. 

<<toBalaNotes "2">>


///%1
//%/

///%2
//%/

///%3
//%/
{{item1{設定鍵盤格式}}}
請選擇美式鍵盤, 這樣在終端機模式下, 才能正常操作

[img[img/android/ax40kb.png]]

<<toBalaNotes "1">>

{{item1{取消休眠功能}}}

[img[img/android/ax40sleep.png]]

<<toBalaNotes "2">>

{{item1{設定 TCP/IP 網路}}}

目前 4.0 RC2 版本有提供 DHCP Client 功能, 可是 DNS 無法被設定

''1. 選擇 Terminal Emulator''

[img[img/android/ax40term.png]]

''2. 登入為 root 帳號''

[img[img/android/ax40su.png]]

''3. 檢視 TCP/IP 設定''

[img[img/android/ax40tcpip.png]]

''4. 手動設定 DNS IP''

[img[img/android/ax40dns.png]]

{{item1{手動設定 TCP/IP}}}
{{{
$ su -

$ cd etc

$ vi init.sh
                 :
ifconfig eth0 192.168.122.77 netmask 255.255.255.0
route add default gw 192.168.122.1 dev eth0
route add -net 192.168.99.0 netmask 255.255.255.0 gw 192.168.122.99 dev eth0
setprop net.dns1 168.95.1.1
}}}

''設定畫面如下 : ''

[img[img/android/ax40nodhcp.png]]

''[註]'' alt+f1 直接進入終端機, alt+f7 離開終端機, 然後再按 ESC 鍵

<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/
※本文內容取自台南市長安國小校長/陳聖謨的『小學生的學習檔案』一文
來源網站網址:http://sparc2.nhltc.edu.tw/~publish/31homepage/17.htm

傳統的紙筆測驗雖然具有簡便速效的優點,然而許多教育人員也大多相信測驗分數並不能代表學生真正的學習表現,也無法瞭解學生在學習過程中的進步情形。例如最典型的標準化成就測驗,就招致了:不能激勵學生學習、預測學生的學習發展效果不佳、窄化了所學的範圍、僅能評定表面的知識等等批評。

因此在九十年代之後,真實性評量 (authentic assessment) 與實作性評量 (performance assessment) 的理念相繼崛起,其中最受到矚目的要算是以檔案形式來顯現學生的學習表現,因為檔案可以激發學生主動學習的動機,並展現學生在各方面學習的進步情形,在美國,使用學生的學習檔案,已成為教學與評鑑的重要趨勢。

學生所建構的學習檔案是指學生在某一期間當中,各種學習的代表作品或其所展現的能力,故檔案是學生在某個領域的學習過程與學習成就所彙編選錄而成的專輯。其內容包含了學生的寫作品,研究報告,閱讀過書目表,口說閱讀的錄音帶,學生的自我評鑑,檢核表;插畫,手工作品代表,平時測驗成績,教師平時觀察評語,父母評語,閱讀心得,對檔案內容的反省評論及檔案內容的次序一覽表等等。從檔案內容,可以看出學生的努力、進步與成就的情形。

使用學習檔案,可結合學生在學習過程的學習記錄與成果的展現,等於兼具了教學與評鑑的效果。

    * 就教師而言,教師可透過學生學習檔案的檢閱,可以全面瞭解學生的學習狀況,並作為個別學習輔導、親師溝通或學年成績通知之用。
    * 就學生而言,可使學生的建構檔案的過程當中,自我監控、自我激勵、交互切磋,有益於其良好學習態度的養成,並獲得有效的學習策略。

雖然檔案的建立需要特別的努力與時間,事實上,其整體效益還是相當值得的。「凡學過必留下痕跡」。隨著多元評量運動的興起與學校本位經營的趨勢下,在學校推展以檔案的方式作為評量方法,或建立全校性學生學習檔案的措施,相信對學生學習表現的評量將更為具體,師生互動也將更為密切直接。

這種具有合作參與,教學並重,過程結果兼具的學習與評量方式,相信將會是教育改革浪潮中的相當具體的實踐行動。


!相關網站
1. 大安高工 - 學習檔案製作網站 : http://210.70.131.3/efiles/main.asp?item=3
2. 北一女中社區化資訊網站 : http://203.64.52.1/~community/92/learnfile.html
本文網址 : http://www.ithome.com.tw/itadm/article.php?c=60737&s=1

雲端運算為當代資訊技術帶來一系列的創新與挑戰,有些是傳統運算平臺中的經典問題,有些則是雲端運算中的新問題、新技術

雲端運算的理念生動地體現出網路時代的資訊服務特性,一系列技術創新不斷被更新,以解決網路平台的服務生命週期管理問題,大規模分散式運算、儲存、通訊,以及資源隨需供應、按量計費問題。接下來我們將更深入討論雲端運算,著重在快速部署、資源調度、多租戶、巨量資料處理、大規模訊息傳輸、大規模分散式儲存、授權管理與計費等關鍵技術。

{{op1{快速部署}}}
自資料中心誕生以來,快速部署就是項重要的功能需求。資料中心管理員和用戶不斷追求更快、更高效、更靈活、功能更齊全的部署方案。雲端運算環境對快速部署的要求將會更高。首先,在雲端環境中的資源和應用不但規模變化範圍大且動態性高,用戶所需的服務主要採用「按需部署」的方式,即用戶隨時呼叫對資源和應用,雲端環境管理程式負責分配資源,部署服務;其次,不同層次雲端運算環境中服務的部署模式是不一樣的,比如虛擬化的基礎架構雲上的應用都被封裝在虛擬機裡,而多租戶平台上的應用則會選擇輕型、較小的封裝方案。另外,部署過程所支援的軟體系統形式很多樣,系統結構各不相同,部署工具要能適應被部署物件的變化。

串流的虛擬機部署方法可以有效減少單一虛擬機的部署時間。包含了作業系統、中介軟體、應用軟體的虛擬機映像,大小通常為幾個GB到幾十個GB,映像的複製速度會嚴重影響虛擬機的部署速度和用戶體驗;另外,虛擬機的啟動囊括整個軟體堆疊的組態和關聯,操作非常複雜,自動化程度的高低直接關係著虛擬機部署的效率。因此,即使採用了串流來部署,過程仍然會耗費大量時間。此外,在部署多個虛擬機時,串流式的虛擬機部署是順序的、串列的,若想進一步提高雲端環境中虛擬機的部署速度,則需要考慮平行部署或者協同部署技術。

平行部署是指將傳統的順序部署方式改變為平行執行,同時執行多個雲部署任務,將虛擬機同時部署到多個實體伺服器上。如圖4.1所示。

[img[img/cloudTech01.jpg]]

理想情況下,平行部署可以雙倍減少部署所需時間,但儲存映像檔所在部署伺服器的讀寫能力,或者部署系統有限的網路頻寬,卻也限制了實際的平行程度(即部署速度)。例如,在網路頻寬有限的情況下,同時運行多個部署任務時,這些任務會搶頻寬,當網路頻寬被佔滿時,部署速度就無法進一步提高。在這種情況下,協同部署技術可用來進一步提高部署速度。

協同部署技術的核心是將虛擬機映像在多個目標實體伺服器間的網路中傳輸,而非僅在部署伺服器和目標實體伺服器之間傳輸,進而提高部署速度。透過協同部署,網路頻寬就不再是限制部署速度的瓶頸,部署的速度上限取決於各目標實體伺服器間的網路頻寬總和。利用虛擬化技術和協同部署技術,我們可以建構一個協同部署系統,進而確保大規模資料中心服務的部署速度、效率和品質。如圖4.2所示,協同部署系統的架構包括了部署伺服器節點(圖中的雲部署伺服器)和被部署節點(圖中的實體主機A、B、C),關鍵模組包括部署控制器、映像拷貝器、協同部署器和協同控制器等。

[img[img/cloudTech02.jpg]]

部署伺服器負責將協同部署器及用戶空間檔案系統(透過I/O操作截獲技術,將用戶的本地檔存取重定向到網路上)的安裝檔發送到被部署節點,並發起部署任務;部署控制器負責協調各個節點間的部署進度,交換檔案資訊;被部署的節點在部署任務開始以後,根據啟動順序向用戶空間文件系統,呼叫虛擬映像資料集,用戶空間檔案系統呼叫協同部署器獲取文件區塊。協同部署技術能大幅提高部署速度。由於實體伺服器間存在大量共用頻寬,因此協同部署可能會影響其他實體伺服器的網路頻寬。

平行部署和協同部署技術同樣可以運用到實體解決方案的自動化部署過程中,加速部署過程。雲端環境中實體解決方案的部署,是指在實體平台上安裝軟體環境。首先,雲端的硬體環境搭建起來以後,需要在這些硬體上安裝雲端軟體環境,包括大規模的作業系統的部署、虛擬機運行平台的組態、雲基礎架構層管理軟體的安裝等。其次,在擴展雲平台架構的時候(例如為現有的資料中心加入新的實體伺服器),需要在新節點上面部署和組態作業系統、虛擬化平台、中介軟體等全套軟體。

與虛擬機的部署相比,實體解決方案自動化部署的難處在於軟體的多樣化和解決方案的複雜性。為了能夠自動化部署實體解決方案,需要定義一種標準格式來封裝解決方案,將軟體程式檔、安裝組態script、metadata等內容一起封裝;還需要一個通用的部署引擎,以及一組自動化安裝組態流程。透過這種方式,部署引擎在接收到解決方案的封裝文件以後,解析解決方案的metadata,按照自動化流程驅動整個解決方案的安裝組態過程。

{{op1{資源調度}}}
資源調度指的是,在特定的資源環境下,根據一定的資源使用規則,在不同使用者間進行資源調整的過程。這些資源使用者對應著不同的運算任務(例如虛擬化解決方案),每個運算任務在作業系統中對應一個或者多個流程。運算任務的資源調度通常有兩種途徑:在運算任務所在硬體上調整資源使用量,或者將運算任務轉移到其他機器上。圖4.3是將運算任務搬移到其他機器上的例子。在這個例子中,實體資源A(如一台實體伺服器)的使用率遠高於實體資源B,透過將運算任務一從實體資源A搬移到實體資源B,使資源的使用更加均衡合理,達到負載平衡的目的。

[img[img/cloudTech03.jpg]]

目前的技術已經可在幾秒內(暫時停機時間為毫秒級)將一個作業系統進程,從一台機器搬移到另一台機器。這種作業系統流程的動態搬移技術,能夠實現運算任務在不同機器間的搬移。虛擬機的出現,使得所有的運算任務都被封裝在一個虛擬機內部。由於虛擬機具有隔離特性,因此可以採用虛擬機的動態搬移方案,達到運算任務搬移的目的。

雲端運算的巨量規模為資源調度帶來了新挑戰。資源調度需要考慮資源的即時使用情況,這就要求對雲端運算環境的資源進行即時監控和管理。雲端運算環境中資源的種類多、規模大,使得即時監控和管理變得十分困難。此外,一個雲端運算環境可能有成千上萬的運算任務,這對調度演算法的複雜性、有效性提出了挑戰。對於基於虛擬化技術的雲基礎架構層,虛擬機的大小一般都在幾個GB 以上,大規模平行的虛擬機搬移作業,很可能會因網路頻寬等因素的限制,而變得非常緩慢。

從調度的粗粒度來看,虛擬機內部應用的調度是雲端運算用戶更加關心的。如何調度資源滿足虛擬機內部應用的「服務層級協議」(SLA),也是目前待解難題之一。以效能為例,應用資源調度系統需要監控應用的即時效能指標,例如吞吐量、回應時間等。利用這些效能指標,結合歷史紀錄及預測模型,分析出未來可能的效能值,並與用戶預先制訂的優化規則進行比對,再結論應用是否需要,以及如何進行資源調整。目前,大多數虛擬化管理方案只能透過在虛擬機級別上的調度技術結合特定調度策略,嘗試為虛擬機內部應用做資源調度,普遍缺乏精確性和效果。

{{op1{多租戶技術}}}
傳統的軟體運行和維護模式要求軟體被部署在用戶所購買或租用的數據中心裡,這些軟體大多服務於特定的個人用戶或者企業用戶。在雲端運算環境中,更多的軟體以「軟體即服務」的方式發布出去,通常會提供給成千上萬的企業用戶共用。和傳統的軟體運行和維護模式相比,雲端運算要求硬體資源和軟體資源能夠更好地共用,具有良好的可擴充性,任何一個企業用戶都能夠按照自己的需求客製化組態,而不影響其他用戶的使用。多租戶技術就是目前得以滿足上述需求的關鍵技術。

多租戶技術是一項雲端運算平台技術,使大量用戶共用同一堆疊的軟硬體資源,每個用戶按需使用資源對軟體服務進行客製化組態,且不影響其他用戶的使用。這裡,每個用戶被稱為一個租戶。如圖4.4所示。

[img[img/cloudTech04.jpg]]

目前普遍認為,採用多租戶技術的「軟體即服務」應用需要兩項基本特徵。首先,「軟體即服務」應用是基於Web,能夠服務大量租戶且容易擴充;其次,在前者基礎上要求「軟體即服務」平台提供附加的業務邏輯,使租戶能夠對「軟體即服務」平台進行擴展,進而滿足更大型企業的需求。目前,多租戶技術面臨的技術難點包括資料隔離、客製化組態、架構擴展和效能客製化。

資料隔離是指多個租戶在使用同一個系統時,租戶的業務資料是相互隔離儲存的,不會相互干擾;多租戶技術需要安全、高效能的資料隔離,確保租戶資料安全及多租戶平台的整體性能。多租戶的資料庫管理有三種基本方式:一是給每個租戶建立單獨資料庫,好處是用戶間資料充分隔離,缺點是資料庫管理的成本和耗用比較大;二是將多個租戶的資料保存在同一個資料庫中,採用不同的Schema,某種程度減少了資料庫的管理成本和耗用,但也相對削弱資料隔離的效果;三是將多個租戶的資料保存在一個資料庫中,採用相同的Schema,也就是說將資料保存在一個表格,或者一類具有相同Schema的表格中,透過租戶的標識碼欄位進行區別。儘管這樣的管理成本和耗用最低,但資料隔離的效果卻也最差,需要相當安全檢驗來保障租戶間的資料隔離。

客製化組態是指,「軟體即服務」應用能夠支持不同租戶對此應用的組態客製化,例如介面顯示風格的客製化等。客製化組態的基本要求是,一個租戶的客製化操作不會影響到其他租戶,這就需要多租戶系統能夠對同一個租戶的組態進行描述和儲存,並且能夠在租戶登錄時,根據該租戶的客製化組態為其呈現相應的應用。在傳統的企業應用運行模式中,每個企業用戶都擁有一個獨立的應用實例,可以非常容易儲存和載入任何客製化組態;但在多租戶場景下,成千上萬的租戶共用同一個應用實例。在現有的平台技術中,例如J2EE,對應用組態的更改,通常會對該平台中的所有用戶產生影響。因此,如何支援不同租戶對同一應用實例的獨立客製化組態,是多租戶技術面臨的基本挑戰。

架構擴展是指多租戶服務能夠提供靈活的、具備高可擴充性的基礎架構,進而確保在不同負載下多租戶平台的效能。典型的多租戶環境下,多租戶平台需能支援許多租戶的同時存取,因此平台可擴充性相當關鍵。一個簡單的方法是,在初始階段就為多租戶平台分配巨量資源,確保在負載達到峰值時,平台的效能不減。然而,很多時候負載並非都處於峰值,所以這個方法會造成龐大的運算資源和能源浪費,大幅提高供應商的營運成本。因而,多租戶平台應該具有靈活可擴充的基礎架構,能夠根據負載的變化隨需擴充。

效能客製化是多租戶技術面臨的另一個挑戰。對於同一個「軟體即服務」應用實例來說,不同的用戶對效能的要求也可能不同。例如,某些客戶希望透過支付更多的費用來獲取更好效能,而另一些客戶則本著「夠用即可」的原則。在傳統的軟體營運模式中,每個客戶擁有獨立的資源堆疊,只需要為付費較多的用戶組態出更高階的資源就可以了,因此相對而言,效能客製化更容易;然而,同一個「軟體即服務」應用有許多租戶共用資源,如何為不同租戶在這套共用資源上靈活地組態效能,是多租戶技術中需要的突破和挑戰。

IT人員經常會面臨選擇虛擬化技術還是多租戶技術的問題。多租戶與虛擬化的不同在於:虛擬化後的每個應用或服務單獨存在於一個虛擬機器裡,不同虛擬機之間實現了邏輯的隔離,一個虛擬機感知不到其他虛擬機器;多租戶環境中的多個應用運行在同一個邏輯環境下,需要透過其他手段,比如應用或服務本身的特殊設計,來確保用戶間的隔離。

多租戶技術也具有虛擬化技術的一部分好處,如可以簡化管理、提高伺服器使用率、節省開支等。從技術實現難度的角度來說,虛擬化已經比較成熟,並且得到大量廠商的支援,而多租戶技術還在發展階段,不同廠商對多租戶技術的定義和實作還有分歧。當然,多租戶技術有其存在的必然性及應用情境。大量用戶使用同一類型應用時,如果把每個用戶的應用都做成單獨的虛擬機,可能需要成千上萬台,佔用大量資源,且多半重複。不僅增加虛擬機的管理難度,消耗的效能也大大增加。在這種情況下,就得以凸顯多租戶技術相對經濟的策略價值。

{{op1{巨量資料處理}}}
做為以網路為運算平台的雲端運算,將會更廣泛囊括到巨量資料處理任務。巨量資料處理指的是對大規模資料的運算和分析,通常資料的規模可以達到TB甚至PB 級別。在網路時代,資料的統計和分析很多是巨量資料級別的,一個典型的例子就是搜尋引擎。由於資料量非常大,一台電腦不可能同時滿足效能和可靠性等要求。以往,對於巨量資料處理的研究通常是基於某種平行運算模型和計算機叢集系統的,平行運算模型可以支援高吞吐量的分散式批次處理運算任務和巨量資料,電腦叢集系統則在網路連接的機器叢集上,建立一個可擴展的可靠運算環境。

在網路時代,由於巨量資料處理操作頻繁,很多研究者投入支援巨量資料處理的程式撰寫模型方面的研究。例如,一九九九年誕生的River 程式撰寫模型,開發人員可以基於該程式撰寫模型進行開發和執行運算任務。River 程式撰寫模型的設計目的就是使得大規模電腦叢集的程式撰寫和運算更容易,並具備更高的運算效能。

River 程式撰寫模型有兩個核心設計特性:高效能的分散式佇列,以及儲存冗餘機制。因此,River 需要對磁片和網路資料傳輸做出很細膩的調度。當前,世界最流行的巨量資料處理的程式撰寫模型是由Google 的狄恩(Jeffrey Dean)等人所設計的 MapReduce,能將一個任務分成很多更小的子任務,這些子任務能調度空閒的處理節點,使得處理速度愈快的節點處理愈多的任務,避免處理速度慢的節點延長整個任務的完成時間。下面,我們將介紹MapReduce 框架的工作原理和設計原則,以便加深讀者對巨量資料處理系統的了解。

[img[img/cloudTech05.jpg]]

{{op1{大規模訊息通信}}}
雲端運算的核心理念就是資源和軟體功能都是以服務的形式進行發布的,不同服務間經常需要透過訊息通信進行協作。可靠、安全、高效能的通訊基礎架構對於雲端運算的成功至關重要。訊息通信可以分為同步通信和非同步通信。如圖4.6所示。

[img[img/cloudTech06.jpg]]

同步訊息通訊是直接呼叫伺服器端服務,等待服務結果返回後才繼續執行。在服務端,服務的運行環境需要保存與用戶端通信的訊息,處理完成時將結果返回給用戶端。這種同步訊息通訊機制可能影響用戶端系統的處理速度,以及服務端系統的可用性。首先,用戶端系統因為需要同步等待而無法繼續處理任務;其次,同步通訊機制長期佔用服務端系統資源,服務實例也因需要與遠端用戶端通信,任務處理完成前無法立即處理下一個任務;再者,同步訊息通訊會降低服務的可用性,因為在分散式環境中,用戶端所呼叫的服務實例可能因為種種因素而不可用,造成用戶端呼叫無法得到處理。因此,非同步訊息通訊對於雲端運算環境就顯得格外重要。

在非同步訊息通信中,用戶端和服務端並不直接通信。用戶端把呼叫以訊息形式放在呼叫訊息佇列裡,繼續處理其他業務邏輯;服務實例會從呼叫訊息佇列中獲取呼叫訊息,並將處理結果放入回應訊息佇列裡,再立即處理下一個呼叫。訊息通信管理軟體判斷訊息呼叫是否成功發給目標服務實例,藉此來判斷該實例是否可用,並且在目標服務實例不可用的情況下將訊息發給其他服務實例,進而為用戶端提供高可用的服務。

服務導向的理念使得非同步訊息通信對雲端運算更加重要。非同步訊息通訊機制可使雲端運算每個層次中的內部元件間和各個層次間解耦合,確保雲端運算服務的高可用性。非同步訊息通信機制對於服務的可擴充性也非常重要,訊息佇列管理軟體可以透過佇列中的訊息數量和訊息呼叫的服務類型,來預測每種服務的工作負載變化趨勢,並且透過該趨勢自動增減服務實例。

雲端運算也為分散式系統中的訊息通信帶來新挑戰。首先,訊息通信服務必須夠穩定,才能確保在應用程式需要使用訊息服務時,服務是一定是可用,確保在傳輸過程中不會丟失訊息;其次,訊息通信服務必須可擴充,進而支持大規模節點同時以高效能執行的訊息讀寫。雲端運算的安全問題一直以來備受關注,因此訊息通訊服務還要確保訊息傳遞的安全,保障業務的安全性。此外,緊湊、高效的訊息內容模型也有助於提高處理效率,這在雲端運算這樣的大規模訊息通信處理環境中尤其明顯。目前,雲端運算環境中的大規模資料通信技術仍在發展階段,亞馬遜的SQS(Simple Queue Service)是當今業界著名的雲端運算大規模訊息通信產品。

{{op1{大規模分散式儲存}}}
分散式儲存的目標是利用多台伺服器的儲存資源,滿足單台伺服器所無法滿足的儲存需求。分散式儲存要求儲存資源能夠被抽象化展現和統一管理,確保資料讀寫操業的安全性、可靠性、效能等各種要求。

過去幾十年,隨著網路技術的發展,愈來愈多的網路應用具有儲存巨量資料的需求,例如搜尋引擎和影片網站,這些需求催生了一些優秀的大規模分散式儲存技術,例如分散式檔案系統。分散式檔案系統允許用戶存取本機系統一般存取遠端伺服器的檔案系統,用戶可以將自己的資料儲存在多個遠端伺服器上,分散式檔案系統基本上都有冗餘備份機制和容錯機制,確保資料讀寫的正確性;雲端環境的儲存服務基於分散式檔案系統,並根據雲儲存的特徵做了相應的組態和改進。以下將分別介紹幾種分散式檔案系統和雲儲存服務。

Frangipani是個可擴充性很好的高效能分散式檔案系統,該系統採用了兩層的服務體系架構:底層是一個分散式儲存服務,能夠自動管理高擴充、高可用的虛擬磁片;在這個分散式儲存服務上層運行的是分散式檔案系統。JetFile 是個基於P2P的廣播技術、支援在網路的異構環境中分享檔案的分散式檔系統。Ceph 是個高效能又可靠的分散式檔案系統,儘可能分開資料和資料管理,藉此獲得最大I/O效能。

GFS(Google File System)是Google 公司設計的可擴充分散式檔案系統。工程師在考慮了分散式檔案系統的設計準則的基礎上,又發現了以下幾個不同於傳統分散式檔案系統的需求:第一,PC伺服器極易發生故障,造成節點失效,故障的原因繁多,有機器本身的、網路的、管理員引起的及外部環境引起的,因此需要對整個系統中的節點進行監控,檢測出現的錯誤,開發相應的容錯和故障回復機制。第二,在雲端運算環境中,巨量的結構化資料會以非常大的檔案儲存,一般為GB等級,因此需改變檔案系統以中小檔案(KB或者MB量級)為準的設計準則,以適應超大檔案的存取。第三,系統中對檔案的寫入絕大多數是追加作業,也就是在檔案的末尾寫入資料(在檔案中寫入資料的情況其實很少發生),而且資料一旦被寫入,也通常是依順序讀取,不會被修改,因此在設計系統時把優化重點放在追加作業上,就可以大幅度提高系統的效能。第四,設計系統時要考慮開放的、標準化操作介面,並隱藏檔案系統下層的負載平衡、備援複製等細節,這樣才可以方便地被上層系統大量使用。因此,GFS 能夠有效支持大規模巨量資料處理應用程式。圖4.7展示了GFS的系統架構。

[img[img/cloudTech07.jpg]]

雲端運算的出現為分散式儲存帶來新需求、新挑戰。在雲端運算環境中,資料的儲存和操作都是以服務形式提供。資料類型形形色色,包括普通檔案、虛擬機映像檔這樣的二進位大型檔案、類似XML的格式化資料,甚至資料庫的關聯式資料等。雲端運算的分散式儲存服務設計必須考慮到各種不同資料類型的大規模儲存機制,以及資料操作的效能、可靠性、安全性和單純性。

目前,雲端運算環境下的大規模分散式儲存方向已經有了研究成果和應用。BigTable 是Google 設計用來儲存巨量結構化資料的佈式儲存系統,Google 使用該系統來將網頁儲存成分散式、多維的、組織化的圖形。Dynamo 是亞馬遜設計的一種基於鍵值對的分散式儲存系統,設計之初的主要考量,就是亞馬遜的大規模資料中心隨時都可能發生大大小小的元件失效,因此Dynamo 能夠提供非常高的可用性。亞馬遜的S3是個支援大規模存儲多媒體這樣的二進位檔的雲端運算儲存服務。亞馬遜的SimpleDB 是建立在S3和EC2之上、用來儲存結構化資料的雲端運算服務。

{{op1{授權管理與計費}}}
授權管理與計費是IT基礎架構的最終支付環節,牽涉服務提供商與客戶的切身利益。客戶透過購買授權或者支付費用獲得對軟硬體、服務的智慧財產權或使用權利,以及相應的售後服務支援;各個供應商獲得客戶支付的費用。因此,透過授權管理與計費,整個IT業才得以運轉。

僅從軟體的授權計費模型來看,傳統的軟體授權購買方式下,用戶需要估算自己需要使用的軟體的CPU數量、主機數量、用戶數量,然後根據軟體發售商提供的授權方法,得到需要購買的授權數量的最大值,做為最終購買的數量。舉例來說,用戶的資料中心有一百台機器需要使用一個軟體,每台機器有一個CPU,那麼用戶購買軟體時,需要購買一百個授權。但在實際使用時,可能只有幾台機器在使用這個軟體,而使用軟體的機器上的CPU佔用率也遠遠不足一○○%;也就是說,在傳統的軟體授權計費模型下,用戶購買的授權數量遠遠超過真實使用量,可說是白花了不少錢。

隨著雲端運算時代的到來,IT基礎架構的授權管理與計費模式將發生重大變化。在雲端運算的場景下,用戶可以按需付費或者按使用計費,少花冤枉錢。在隨需付費模式下,用戶可以估計自己對於軟體授權的使用情況,決定自己採購的授權數量。雲端運算環境會根據用戶的支付給用戶一定量的授權,並按照用戶在雲端運算環境中的使用情況,運算已使用的授權數量或發布授權。當剩餘的授權數量少於某個特定值時,系統會提醒用戶決定是否追加付費,或者減少他使用的授權數量。

在隨需計費的模式下,用戶甚至不需提前估計自己需要的授權數量,系統會自動追蹤用戶在雲端環境裏的使用情況,定期產出授權帳單。也就是說,未來用戶使用雲端運算環境中的資源,會像用水和用電一樣簡單方便。雖然雲端運算的新型計費模型設計得非常美好,但為了達到這個理想,還有很多工作要做,其中最迫切的問題就是,眾多軟硬體供應商目前還沒有制訂出對應在雲端運算環境的產品計費模式,成為這些產品進入雲端運算環境的障礙。(摘錄整理自第四章) 
''參考文章''
1. 改變世界的免費雲端軟體:OpenStack 幕後創造祕辛(上)
http://wired.tw/2012/04/11/openstack-1/index.html
2. 改變世界的免費雲端軟體:OpenStack幕後創造祕辛(下)
http://wired.tw/2012/04/12/openstack-2/index.html

{{item1{Open Stack}}}
官方網站 : http://www.openstack.org/

''OpenStack''
OpenStack is a global collaboration of developers and cloud computing technologists producing the ubiquitous open source cloud computing platform for public and private clouds. The project aims to deliver solutions for all types of clouds by being simple to implement, massively scalable, and feature rich. The technology consists of a series of interrelated projects delivering various components for a cloud infrastructure solution.

''Who's behind OpenStack?''
Founded by Rackspace Hosting and NASA, OpenStack has grown to be a global software community of developers collaborating on a standard and massively scalable open source cloud operating system. Our mission is to enable any organization to create and offer cloud computing services running on standard hardware.

''Who uses OpenStack? ''
Corporations, service providers, VARS, SMBs, researchers, and global data centers looking to deploy large-scale cloud deployments for private or public clouds leveraging the support and resulting technology of a global open source community.

''Why open matters: ''
All of the code for OpenStack is freely available under the Apache 2.0 license. Anyone can run it, build on it, or submit changes back to the project. We strongly believe that an open development model is the only way to foster badly-needed cloud standards, remove the fear of proprietary lock-in for cloud customers, and create a large ecosystem that spans cloud providers.

For more information, visit the OpenStack Community Q&A.

{{item1{Eucalyptus}}}
官方網站 : http://open.eucalyptus.com/learn

''Eucalyptus'' is software that implements scalable ''IaaS-style'' private and hybrid clouds. The Eucalyptus architecture is highly modular with internal components consisting of Web services, which make them easy to replace and expand. Eucalyptus' flexibility enables it to export a variety of APIs towards users via client tools. Currently Eucalyptus implements the Amazon Web Service (AWS) API, which allows interoperability with existing AWS-compatible services and tools. This also allows Eucalyptus users to group resources drawn both from an internal private cloud and external public clouds to form a hybrid cloud.

{{item1{OpenNebula}}}
官方網站 : http://www.opennebula.org/doku.php?id=start

Fully open source (not open core), thoroughly tested, customizable, extensible and with unique features and excellent performance and scalability to manage hundreds of thousands of VMs:
* Private cloud with Xen, KVM and VMware,
* Hybrid cloud (cloudbursting) with Amazon EC2, and other providers through Deltacloud (from ecosystem),
* Public cloud supporting EC2 Query, OGF OCCI and vCloud (from ecosystem) APIs
<<toBalaNotes "1">>
{{item1{OCCI 開放雲端運算介面}}}
OCCI 官方網址 : http://www.occi-wg.org/doku.php

開放網格論壇(Open Grid Forum, OGF)早已成立專責介面標準化的工作小組。其所制定的開放雲端運算介面標準(Open Cloud Computing Interface, OCCI),即為一個免費、開放、為社群共同接納推動,且以雲端基礎架構服務為鎖定目標的介面標準。藉由該API,資料中心與雲端夥伴可以免受現有一堆專利或開放雲端API之間歧異不相容之苦。

面對雲端基礎架構服務所組成之關鍵元件,目前OCCI是採用資源導向架構(Resourced Oriented Architecture, ROA)來表示。同時,每個由簡潔URI標示的資源可擁有許多不同的描述呈現方式(例如可以超文件來表示)。OCCI工作小組正規劃在API中加入許多格式的支援,在初始版本中,Atom/Pub、JSON及Plain Text等標準都被納入支援行列中。

該版本並且規定一個單獨URI進入點(Entry Point)定義一個OCCI介面,該介面顯示「Nouns」內含屬性,其中的「Verb」會被執行。原則上,該屬性會以鍵值對(Key-value pairs)表示,而適當的動詞則以連結(Link)表示。重要的是,該屬性會以URI來描述。

[img[img/OCCI01.gif]]

該 API 不僅提供 CRUD 操作,且分別與 HTTP Verb 的 POST、GET、PUT 及 Delete 等參數相對應。HEAD 與 OPTIONS 等 Verb 參數可用來檢索詮釋資料(Metadata)與有效操作,而不需要實體主體來增進效能。所有 HTTP 功能均能利用現有網際網路基礎架構,包括快取、代理、閘道及其他進階功能。再者,所有詮釋資料,包括資源間的關聯性會透過 HTTP 表頭對外公開。該介面原生地以 ATOM 表示,並盡可能地接近底層 HTTP 協定來執行。

OCCI 會提供對基礎架構服務之定義、創建、部署、操作及退出的管理功能。透過簡易服務生命週期模型,可支援由雲端供應商提供的基本通用生命週期狀態。在事件中,供應商並不會提供或報告服務生命週期狀況,OCCI 並不會強制遵行,而是將生命週期模型定義成提議書,供雲端供應商遵循。

參照 OCCI,雲端運算用戶端可啟動執行全新應用程式堆疊,並管理其生命週期與其採用的資源。為了執行像是來自 SNIA CDMI 介面所導出的應用程式堆疊,透過 OCCI 介面即可分派儲存至特定虛擬機器。SNIA 機構並表示,接下來該組織會進一步對儲存管理與其中資料管理之方法途徑進行檢驗。 

{{item1{管理介面標準 - REST}}}
當前 IaaS 方案多半提供了基於 REST(REpresentational State Transfer)式的 HTTP 操作介面,透過該介面,可允許在其基礎架構上進行虛擬映像檔的部署、管理,以及資源的指定分配。

REST 介面並沒有其他協定的額外負擔,它允許使用者可以簡易地存取其伺服器。每個資源皆透過獨一無二的 URI(Uniform Resource Identifier)定址,同時基於 CRUD(Create 創建、Retrieve 檢索、Update 更新、Delete 刪除)四個操作,資源因而能被控管。

<<toBalaNotes "3">>

///%3
//%/

///%1
//%/
官方網址 : http://code.google.com/intl/en/android/

{{item1{Android Architecture}}}
本文網址 : http://developer.android.com/guide/basics/what-is-android.html

The following diagram shows the major components of the Android operating system. Each section is described in more detail below.

[img[img/android-system-architecture.jpg]]

<<toBalaNotes "android">>


///%android
//%/
{{item1{設定語系}}}

[img[img/android/aop01.png]]

[img[img/android/aop02.png]]

[img[img/android/aop03.png]]

[img[img/android/aop04.png]]

{{item1{連接網站}}}

[img[img/android/abrow01.png]]

[img[img/android/abrow02.png]]

[img[img/android/abrow03.png]]
<<toBalaNotes "1">>


///%1
//%/
After having seen CouchDB’s raw API, let’s get our feet wet by playing with Futon, the built-in administration interface. Futon provides full access to all of CouchDB’s features and makes it easy to work with some of the more complex ideas involved. With Futon we can create and destroy databases; view and edit documents; compose and run MapReduce views; and trigger replication between databases.

To load Futon in your browser, visit:
{{{
http://127.0.0.1:5984/_utils/
}}}
If you’re running version 0.9 or later, you should see something similar to Figure 1, “The Futon welcome screen”. In later chapters, we’ll focus on using CouchDB from server-side languages such as Ruby and Python. As such, this chapter is a great opportunity to showcase an example of natively serving up a dynamic web application using nothing more than CouchDB’s integrated web server, something you may wish to do with your own applications.

The first thing we should do with a fresh installation of CouchDB is run the test suite to verify that everything is working properly. This assures us that any problems we may run into aren’t due to bothersome issues with our setup. By the same token, failures in the Futon test suite are a red flag, telling us to double-check our installation before attempting to use a potentially broken database server, saving us the confusion when nothing seems to be working quite like we expect! 

{{item1{設定對外連接 IP 位址}}}
{{op1{1. 修改設定檔}}}

請將 bind_address 預設的 127.0.0.1 改成連外的 IP 位址
{{{
# nano /etc/couchdb/default.ini
                 :
                 :
[httpd]
port = 5984
bind_address = 140.137.214.252
max_connections = 2048
authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {c$
default_handler = {couch_httpd_db, handle_request}
secure_rewrites = true
vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
allow_jsonp = false
                     :
                     :
}}}

{{op1{2. 重新啟動 CouchDB}}}
{{{
# /etc/init.d/couchdb restart
}}}

{{op1{3. 連接 Futon 網站}}}

啟動瀏覽器, 輸入以下 URL :
{{{
http://140.137.214.252:5984/_utils/
}}}

連接成功, 如下圖 :

[img[img/couchdb/futon01.png]]

<<toBalaNotes "futon">>


///%futon
//%/
''參考文章''
1. Running Hadoop On Ubuntu Linux (Single-Node Cluster)
http://www.michael-noll.com/tutorials/running-hadoop-on-ubuntu-linux-single-node-cluster/
2. Distributed data processing with Hadoop, Part 1: Getting started
http://www.ibm.com/developerworks/linux/library/l-hadoop-1/
3. Single Node Setup
http://hadoop.apache.org/common/docs/current/single_node_setup.html
4. Hadoop space quotas, HDFS block size, replication and small files
http://www.michael-noll.com/blog/2011/03/28/hadoop-space-quotas-hdfs-block-size-replication-and-small-files/

@@font-size:14pt;
Hadoop can also be run on a single-node in a ''pseudo-distributed mode (單點分散架構)'' where each Hadoop daemon runs in a ''separate Java process(JVM)''.@@

{{item1{佈暑 Hadoop 實驗系統}}}

''1. 批次建立 Hadoop 虛擬電腦及網路''
{{{
$ cd ~/iLab

$ sudo ./labcmd.sh create -f Lab301
}}}

[img[img/Lab/Lab301.png]]

''2. 啟動 Lab301 實驗系統''
{{{
$ sudo ./labcmd.sh start Lab301
}}}

{{item1{HDP120 虛擬電腦 - 建立 ssh 自動登入連接}}}
執行 ssh localhost 命令, 登入不需輸入密碼, 請執行以下操作:

''1. 登入 HDP120''
{{{
$ sudo virsh console HDP120
Connected to domain HDP120
Escape character is ^]

root@HDP120:~# 
}}}

''2. 編輯 /etc/hosts 檔''
{{{
# 回到家目錄
$ cd                   

# 修改 /etc/hosts 設定檔
$ sudo nano /etc/hosts
127.0.0.1 localhost
192.168.100.20 HDP120
192.168.100.21 HDP121

# The following lines are desirable for IPv6 capable hosts
# (added automatically by netbase upgrade)

::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
}}}

''3. 產生自動登入憑證''
{{{
# cd
# ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
# cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
}}}

''4. 第一次 ssh 自動登入操作 ''
{{{
# ssh HDP120
The authenticity of host 'hdp120 (192.168.100.20)' can't be established.
RSA key fingerprint is be:4d:f0:a2:a4:c7:aa:4f:ff:f3:29:39:b7:b8:c7:4f.
Are you sure you want to continue connecting (yes/no)? yes                         #  回答 yes, 下載 SSH Server 憑證
Warning: Permanently added 'hdp120,192.168.100.20' (RSA) to the list of known hosts.
Linux HDP120 2.6.32-33-generic-pae #72-Ubuntu SMP Fri Jul 29 22:06:29 UTC 2011 i686 GNU/Linux
Ubuntu 10.04.4 LTS

Welcome to Ubuntu!
 * Documentation:  https://help.ubuntu.com/
Last login: Sat Jun 30 23:12:49 2012
root@HDP120:~# 
}}}

''5. 結束連線''
{{{
root@HDP120:~#  exit
logout
Connection to HDP120 closed.
}}}

{{item1{HDP120 虛擬電腦 - 設定 Hadoop 單點分散架構}}}

''1. 建立檔案儲存目錄''
{{{
$ cd /mnt/hda1/hadoop-1.0.3/

$ mkdir data

}}}

One of the basic tasks involved in setting up a Hadoop cluster is determining where the several various Hadoop-related directories will be located. Where they go is up to you; in some cases, the default locations are inadvisable and should be changed. This section identifies these directories.

|Directory|Description|Default location|	Suggested location|
|HADOOP_LOG_DIR|Output location for log files from daemons|${HADOOP_HOME}/logs|/var/log/hadoop|
|@@color:red;hadoop.tmp.dir@@|A base for other temporary directories|/tmp/hadoop-${user.name}|/tmp/hadoop|
|dfs.name.dir|Where the NameNode metadata should be stored|${@@color:red;hadoop.tmp.dir@@}/dfs/name|/tmp/hadoop/dfs/name|
|dfs.data.dir|Where DataNodes store their blocks|${@@color:red;hadoop.tmp.dir@@}/dfs/data|/tmp/hadoop/dfs/data|
|mapred.system.dir|The in-HDFS path to shared MapReduce system files|${@@color:red;hadoop.tmp.dir@@}/mapred/system|/tmp/hadoop/mapred/system|

This table is not exhaustive; several other directories are listed in conf/hadoop-defaults.xml. The remaining directories, however, are initialized by default to reside under hadoop.tmp.dir, and are unlikely to be a concern.

It is critically important in a real cluster that dfs.name.dir and dfs.data.dir be moved out from hadoop.tmp.dir. A real cluster should never consider these directories temporary, as they are where all persistent HDFS data resides. Production clusters should have two paths listed for dfs.name.dir which are on two different physical file systems, to ensure that cluster metadata is preserved in the event of hardware failure.

A multi-user configuration should also definitely adjust mapred.system.dir. Hadoop's default installation is designed to work for standalone operation, which does not use HDFS. Thus it conflates HDFS and local file system paths. When enabling HDFS, however, MapReduce will store shared information about jobs in mapred.system.dir on the DFS. If this path includes the current username (as the default hadoop.tmp.dir does), this will prevent proper operation. The current username on the submitting node will be the username who actually submits the job, e.g., "alex." All other nodes will have the current username set to the username used to launch Hadoop itself (e.g., "hadoop"). If these do not match, the TaskTrackers will be unable to find the job information and run the MapReduce job.

For this reason, it is also advisable to remove ${user.name} from the general hadoop.tmp.dir.

While most of the directories listed above (all the ones with names in "foo.bar.baz" form) can be relocated via the conf/hadoop-site.xml file, the HADOOP_LOG_DIR directory is specified in conf/hadoop-env.sh as an environment variable. Relocating this directory requires editing this script. 

''2. 設定 conf/core-site.xml''
{{{
$ nano conf/core-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>fs.default.name</name>
         <value>hdfs://HDP120:9000</value>
     </property>
     <property>
         <name>hadoop.tmp.dir</name>
         <value>/mnt/hda1/hadoop-1.0.3/data</value>
     </property>
</configuration>
}}}

''3. 設定 conf/hdfs-site.xml''
{{{
$ nano conf/hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>dfs.replication</name>
         <value>1</value>
     </property>
</configuration>
}}}

''[註]'' dfs.safemode.threshold.pct 的值為 0, NameNode 一啟動不會進入 safe mode (read only)
{{{
<property>
     <name>dfs.safemode.threshold.pct</name>
     <value>0</value>
</property>
}}}

''4. 設定 conf/mapred-site.xml''
{{{
$ nano conf/mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
     <property>
         <name>mapred.job.tracker</name>
         <value>HDP120:9001</value>
     </property>
</configuration>
}}}

''5. masters/slaves 設定檔''
masters 用來設定 SecondaryNameNode, 如沒有設定, SecondaryNameNode 這服務不會被啟動
{{{
$ nano conf/masters
HDP120
}}}

slaves 用來設定 DataNode
{{{
$ nano conf/slaves 
HDP120
}}}

{{item1{設定 Hadoop 環境變數 (conf/hadoop-env.sh)}}}
之所以要做這設定, 因為執行 HDFS 系統, 是以 root 身份執行, 而 root 的環境變數, 並沒有設定 JAVA_HOME 及 HADOOP_HEAPSIZE 這二個環境變數, 所以請在 conf/hadoop-env.sh 檔案, 設定 JAVA_HOME 及 HADOOP_HEAPSIZE 這二個環境變數, 
{{{
$ nano conf/hadoop-env.sh 
# Set Hadoop-specific environment variables here.

# The only required environment variable is JAVA_HOME.  All others are
# optional.  When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.

# The java implementation to use.  Required.
export JAVA_HOME=/mnt/hda1/jdk1.6.0_33

# Extra Java CLASSPATH elements.  Optional.
# export HADOOP_CLASSPATH=

# The maximum amount of heap to use, in MB. Default is 1000.
export HADOOP_HEAPSIZE=128 

# Extra Java runtime options.  Empty by default.
# export HADOOP_OPTS=-server
                       :
}}}

{{op1{Hadoop 0.20.203 系統修正}}}
修改 bin/hadoop 執行檔 (Bash Script 程式)

{{{
# nano bin/hadoop
                  :
                  :
  CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
  if [[ $EUID -eq 0 ]]; then
    HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS"
  else
    HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
  fi
                   :
                   :
}}}

將上面程式中的這一行 HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS", 改成下式 :
{{{
HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS"
}}}

{{item1{格式化分散式檔案系統 : HDFS 1.0.3}}}

''# hadoop namenode -format''
{{{
12/06/29 15:16:05 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = HDP120/192.168.100.20
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 1.0.3
STARTUP_MSG:   build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1335192; compiled by 'hortonfo' on Tue May  8 20:31:25 UTC 2012
************************************************************/
12/06/29 15:16:06 INFO util.GSet: VM type       = 32-bit
12/06/29 15:16:06 INFO util.GSet: 2% max memory = 19.33375 MB
12/06/29 15:16:06 INFO util.GSet: capacity      = 2^22 = 4194304 entries
12/06/29 15:16:06 INFO util.GSet: recommended=4194304, actual=4194304
12/06/29 15:16:07 INFO namenode.FSNamesystem: fsOwner=root
12/06/29 15:16:07 INFO namenode.FSNamesystem: supergroup=supergroup
12/06/29 15:16:07 INFO namenode.FSNamesystem: isPermissionEnabled=true
12/06/29 15:16:07 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100
12/06/29 15:16:07 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
12/06/29 15:16:07 INFO namenode.NameNode: Caching file names occuring more than 10 times 
12/06/29 15:16:07 INFO common.Storage: Image file of size 110 saved in 0 seconds.
12/06/29 15:16:08 INFO common.Storage: Storage directory /mnt/hda1/hadoop-1.0.3/data/dfs/name has been successfully formatted.
12/06/29 15:16:08 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at HDP120/192.168.100.20
************************************************************/
}}}

''問題 : 執行 "hadoop namenode -format" 命令, 為何告知 JAVA_HOME 沒有設定''
{{{
$ sudo $HADOOP_INSTALL/bin/hadoop namenode -format
Error: JAVA_HOME is not set.

Well that's funny, I set JAVA_HOME in my /etc/profiles.

user@linux01:~$ tail -n 4 /etc/profile
export JAVA_HOME=/usr/local/jdk1.6.0_32/bin
export JDK_HOME=$JAVA_HOME
export PATH=$PATH:/usr/local/jdk1.6.0_32/bin
export HADOOP_INSTALL=/usr/local/hadoop/hadoop-1.0.3

Did I mess that up somehow?

user@linux01:~$ echo $JAVA_HOME
/usr/local/jdk1.6.0_32/bin
user@linux01:~$ ls $JAVA_HOME
appletviewer  extcheck       jar        javac    and so forth...

Seems to work. Maybe it absolutely has to be set in my hadoop-env.sh?

# The java implementation to use.  Required.
export JAVA_HOME=$JAVA_HOME

Lazy, yeah, but I still get "JAVA_HOME is not set" with or without this comment. I'm running low on ideas. Anyone see anything I'm missing?
}}}

''回答 :''
{{{
Thank you @Chris Shain and @Chris White for your hints. I was running hadoop as su, and su doesn't automatically know about the environmental variables I set. I logged in as my hadoop user (I had chown'd the hadoop install directory to this user), and was able to format the hdfs.

Secondary problem: When I tried to start Hadoop, NameNode and JobTracker started successfully but DataNode, SecondaryNameNode, and TaskTracker failed to start. I dug in a little bit. NameNode and JobTracker are started via hadoop-daemon.sh, but DataNode, SecondaryNameNode, and TaskTracker are started by hadoop-daemon*s*.sh. The resolution here was to properly set JAVA_HOME in conf/hadoop-env.sh.
}}}

{{item1{HDFS 分散式檔案系統的目錄結構}}}
格式化完之後,系統會在 /mnt/hda1/hadoop-1.0.3/data 目錄, 建立以下目錄內容如下 : 
{{{
# tree data
data
└── dfs
    └── name
        ├── current
        │        ├── edits
        │        ├── fsimage
        │        ├── fstime
        │        └── VERSION
        └── image
            └── fsimage

4 directories, 5 files
}}}

@@color:red;如 NameNode 的資料庫設定存放在 /tmp 目錄下, 系統如正常關機 (shutdown -h now), /tmp 目錄會被清空 (reboot 不會被清空), 這時如執行 hadoop 命令會出現以下錯誤訊息@@
{{{
# hadoop dfs -ls /
11/06/22 16:10:52 INFO ipc.Client: Retrying connect to server: HDP120/192.168.100.20:9000. Already tried 0 time(s).
11/06/22 16:10:53 INFO ipc.Client: Retrying connect to server: HDP120/192.168.100.20:9000. Already tried 1 time(s).
11/06/22 16:10:54 INFO ipc.Client: Retrying connect to server: HDP120/192.168.100.20:9000. Already tried 2 time(s).
11/06/22 16:10:55 INFO ipc.Client: Retrying connect to server: HDP120/192.168.100.20:9000. Already tried 3 time(s).
11/06/22 16:10:56 INFO ipc.Client: Retrying connect to server: HDP120/192.168.100.20:9000. Already tried 4 time(s).
}}}

{{item1{啟動 Hadoop}}}
{{{
# start-all.sh 
starting namenode, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-namenode-HDP120.out
HDP120: starting datanode, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-datanode-HDP120.out
HDP120: starting secondarynamenode, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-secondarynamenode-HDP120.out
starting jobtracker, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-jobtracker-HDP120.out
HDP120: starting tasktracker, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-tasktracker-HDP120.out
}}}

''[註]'' start-all.sh 會跟據 conf/masters 及 conf/slaves 這二個檔案的主機名稱, 使用 ssh 自動登入主機, 啟動所需的服務
''[註]'' ''NameNode'' and ''JobTracker'' are started via ''hadoop-daemon.sh'', but ''DataNode, SecondaryNameNode, and TaskTracker'' are started by ''hadoop-daemon*s*.sh''. 

{{item1{檢視啟動那些 Hadoop Daemon}}}
{{{
$ jps
2232 NameNode
2667 TaskTracker
2347 DataNode
2468 SecondaryNameNode
2774 Jps
2546 JobTracker
}}}

''每一個 Hadoop daemon 均獨立執行'' 
{{{
$ ps aux |  grep java
root      2232  3.8 10.0 558752 50940 ttyS0    Sl   15:25   0:03 /mnt/hda1/jdk1.6.0_33/bin/java -Dproc_namenode -Xmx384m -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dhadoop.log.dir=/mnt/hda1/hadoop-1.0.3/libexec/../logs -Dhadoop.log.file=hadoop-root-namenode-HDP120.log -Dhadoop.home.dir=/mnt/hda1/hadoop-1.0.3/libexec/.. -Dhadoop.id.str=root -Dhadoop.root.logger=INFO,DRFA  
                                :
root      2347  7.5  8.3 553240 42580 ?        Sl   15:25   0:06 /mnt/hda1/jdk1.6.0_33/bin/java -Dproc_datanode -Xmx384m -server -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dhadoop.log.dir=/mnt/hda1/hadoop-1.0.3/libexec/../logs -Dhadoop.log.file=hadoop-root-datanode-HDP120.log -Dhadoop.home.dir=/mnt/hda1/hadoop-1.0.3/libexec/.. -Dhadoop.id.str=root -Dhadoop.root.logger=INFO,DRFA -Dhadoop.security.logger=INFO,NullAppender -Djava.library.path=/mnt
                                :
root      2468  2.7  6.8 552328 34856 ?        Sl   15:25   0:02 /mnt/hda1/jdk1.6.0_33/bin/java -Dproc_secondarynamenode -Xmx384m -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dhadoop.log.dir=/mnt/hda1/hadoop-1.0.3/libexec/../logs -Dhadoop.log.file=hadoop-root-secondarynamenode-HDP120.log -Dhadoop.home.dir=/mnt/hda1/hadoop-1.0.3/libexec/.. -Dhadoop.id.str=root -Dhadoop.root.logger=INFO,DRFA -Dhadoop.security.logger=INFO,NullAppender -Djava.library.path=/mnt/hda1/hadoop-1.0.3/libexec/../lib/native/Linux-i386-32 -Dhadoop.policy.file=hadoop-policy.xml -classpath /mnt/hda1/hadoop-1.0.3/libexec/../conf:/mnt/hda1
                                 :
root      2546  4.1  7.6 559256 38848 ttyS0    Sl   15:25   0:03 /mnt/hda1/jdk1.6.0_33/bin/java -Dproc_jobtracker -Xmx384m -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote -Dhadoop.log.dir=/mnt/hda1/hadoop-1.0.3/libexec/../logs -Dhadoop.log.file=hadoop-root-jobtracker-HDP120.log -Dhadoop.home.dir=/mnt/hda1/hadoop-1.0.3/libexec/.. -Dhadoop.id.str=root -Dhadoop.root.logger=INFO,DRFA -Dhadoop.security.logger=INFO,DRFAS -Djava.library.path=/mnt/hda1/hadoop-1.0.3/libexec/../lib/native/Linux-i386-32 -Dhadoop.policy.file=hadoop-policy.xml -classpath /mnt/hda1/hadoop-
                                 :
root      2667  3.9  7.3 557188 37356 ?        Sl   15:25   0:03 /mnt/hda1/jdk1.6.0_33/bin/java -Dproc_tasktracker -Xmx384m -Dhadoop.log.dir=/mnt/hda1/hadoop-1.0.3/libexec/../logs -Dhadoop.log.file=hadoop-root-tasktracker-HDP120.log -Dhadoop.home.dir=/mnt/hda1/hadoop-1.0.3/libexec/.. -Dhadoop.id.str=root -Dhadoop.root.logger=INFO,DRFA -Dhadoop.security.logger=INFO,NullAppender -Djava.library.path=/mnt/hda1/hadoop-1.0.3/libexec/../lib/native/Linux-i386-32 -Dhadoop.policy.file=hadoop-policy.xml -classpath /mnt/hda1/hadoop-1.0.3/libexec/../conf:/mnt/hda1/jdk1.6.0_33/lib/tools.jar:/mnt/hda1/hadoop-1.0.3/libexec/..:/mnt/hda1/hadoop-1.0.3/libexec/../hadoop-core-1.0.3.jar:/mnt/hda1/hadoop-1.0.3/libexec
}}}

''[重點]'' 以上每一個 JVM 程序, 設定 Heap 最大記憶空間為 384M (-Xmx384m)
{{item1{停止 Hadoop}}}
{{{
$  stop-all.sh
stopping jobtracker
HDP120: stopping tasktracker
stopping namenode
HDP120: stopping datanode
HDP120: stopping secondarynamenode
}}}

{{item1{啟動與停止 HDFS}}}
{{{
$ start-dfs.sh
starting namenode, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-namenode-HDP120.out
HDP120: starting datanode, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-datanode-HDP120.out
HDP120: starting secondarynamenode, logging to /mnt/hda1/hadoop-1.0.3/libexec/../logs/hadoop-root-secondarynamenode-HDP120.out

$ jps
3506 Jps
3192 NameNode
3429 SecondaryNameNode
3308 DataNode

$ stop-dfs.sh 
stopping namenode
HDP120: stopping datanode
HDP120: stopping secondarynamenode
}}}

<<toBalaNotes "2">>

///%2
//%/


''1. 取得 Linux 核心虛擬框架系統檔''
在家目錄中, 下載 KVM 魔法書套件檔
{{{
$ cd ~
$ wget http://tobala.net/download/kvmgrimoire.zip
--2012-12-15 14:49:51--  http://tobala.net/download/kvmgrimoire.zip
正在查找主機 tobala.net (tobala.net)... 69.89.27.215
正在連接 tobala.net (tobala.net)|69.89.27.215|:80... 連上了。
已送出 HTTP 要求,正在等候回應... 200 OK
長度: 25981275 (25M) [application/zip]
Saving to: `kvmgrimoire.zip.1'

100%[======================================>] 25,981,275   198K/s   in 2m 44s

2012-12-15 14:52:38 (155 KB/s) - `kvmgrimoire.zip.1' saved [25981275/25981275]
}}}

''2. 安裝 Linux 核心虛擬框架系統''
將 kvmgrimoire.zip 解壓縮至家目錄中, 命令如下 :
{{{
$ cd ~
$ unzip kvmgrimoire.zip
}}}

''3. 檢查與安裝 Linux 核心虛擬框架系統所需之執行套件''
{{{
$ cd ~/kvmgrimoire

$ sudo ./lib/kvmcheck.sh
sudo apt-get install uml-utilities libxml2-utils libxml-xpath-perl
}}}
請根據上述程式的輸出資訊, 安裝所需套件, 命令如下 :
{{{
$ sudo apt-get install uml-utilities libxml2-utils libxml-xpath-perl
}}}

''4. 產生與指定 XML 系統架構檔''
首先在 ~/kvmgrimoire/conf 目錄中, 產生 mynet.xml 系統架構檔, 命令如下 :
{{{
$ cd ~/kvmgrimoire
}}}
mynet.xml 系統架構檔建立後, 在 conf/default 指定使用 mynet.xm, 操作命令如下 :
{{{
$ touch conf/mynet.xml

$ echo "conf/mynet.xml" > conf/default
}}}

''5. 建立所需的虛擬網路''
編輯 mynet.xml 系統架構檔, 命令如下 :
''$ nano conf/mynet.xml''
{{{
<?xml version="1.0"?>
<network>

  <!-- 透過 name 屬性, 指定 Switch HUB 的名稱 -->
  <switch-hub name="SH100">
     <!-- 設定 Bare-Metal 主機可直接與此網段其他虛擬主機連接的 IP 位址 (可以不設定) -->
     <ip>172.16.100.1/24</ip>
     <ports>
       <!-- 此 Port 作為 Switch-Hub 裝置本身使用 -->
       <tap name="SH100-NET" mac="02:01:00:00:00:00"/>

       <!-- 這些 Port 提供給虛擬主機使用 -->
       <tap name="SH100P1" mac="02:01:00:00:00:01"/>
       <tap name="SH100P2" mac="02:01:00:00:00:02"/>

       <!-- 這些 Port 提供給路由主機使用 -->
       <tap name="SH100R1" mac="02:01:00:00:00:f0"/>
       <tap name="SH100R2" mac="02:01:00:00:00:f1"/>
     </ports>
  </switch-hub>
</network>
}}}
XML 系統架構檔一旦設定完畢, 可執行 kvmaddsw.sh 程式, 實作所需的虛擬網路 (SH100)
{{{
$ sudo ./kvmaddsw.sh SH100
SH100SH100 建立成功
  SH100-NET 建立成功
  SH100P1 建立成功
  SH100P2 建立成功
  SH100R1 建立成功
  SH100R2 建立成功
}}}
如要刪除所建立的虛擬網路 (SH100), 執行以下程式
{{{
$ sudo ./kvmdelsw.sh SH100
SH100 刪除成功
  SH100-NET 刪除成功
  SH100P1 刪除成功
  SH100P2 刪除成功
  SH100R1 刪除成功
  SH100R2 刪除成功
}}}

''6. 規劃與建置所需的虛擬硬碟檔''
因 NAT 虛擬主機所面對的網路架構可能是多網段,所以它會需要虛擬硬碟, 儲存路由表資訊. 建立虛擬硬碟檔, 請在 conf/mynet.xml 加入以下定義,  命令如下 :
''$ nano conf/mynet.xml''
{{{
<network>
                 ::
  <vm-disk>
    <disk type="raw" name="vmdisk/NAT100.img" size="20m"/>
  </vm-disk>

</network>
}}}
根據 mynet.xml 定義檔產生所需的虛擬硬碟檔, 執行以下程式
{{{
$ sudo ./kvmdisk.sh 
vmdisk/NAT100.img : 建立成功 (raw 型態)
vmdisk/NAT100.img : 掛載成功 (/dev/loop5)
vmdisk/NAT100.img : 硬碟分割成功
vmdisk/NAT100.img : 格式化 Ext4 檔案系統 成功
vmdisk/NAT100.img : 卸載成功 (/dev/loop5)
}}}

''7. 規劃與建置所需的 NAT 虛擬主機''

增加實作主機 br0 網路介面, 編輯 /etc/network/interfaces 檔案, 命令如下 :
{{{
$ sudo nano /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet manual

auto br0
iface br0 inet dhcp
  bridge_ports eth0
  bridge_stp off
}}}

''[注意]'' /etc/network/interfaces 檔案修改後, 一定要重新開機

設定 NAT 虛擬主機, 請在 conf/mynet.xml 加入以下定義, 命令如下 :
''$ nano conf/mynet.xml''
{{{
<network>
              :
              :
  <router name="RBR0.100">
     <vm name="GW100" osname="TSCKERNEL" ostype="32" mem="128">
       <uplink sw="br0" port="NAT100" mac="02:01:72:16:10:22">
         <!-- 設定此網路介面使用 DHCP 取得 TCP/IP 設定 -->
         <ipv4>dhcp</ipv4>
       </uplink>
       <nextlink port="SH100R2" mac="02:02:72:16:10:ff">
         <ipv4>172.16.100.254:255.255.255.0::</ipv4>
       </nextlink>
       <!-- 啟動 NAT 功能 -->
       <nat>true</nat>
       <disk name="vmdisk/NAT100.img"/>
     </vm>
  </router>
</network>
}}}
根據 mynet.xml 定義檔啟動所需的 NAT 虛擬主機, 執行以下程式
{{{
$ sudo ./kvmrouter.sh RBR0.100
}}}

''[注意]'' 執行上述命令, 請確認是否已建立 SH100 虛擬網路, 如沒有請執行  ''sudo ./kvmaddsw.sh SH100''

NAT 虛擬主機啟動後, 需在系統中自行下載 iptables.tcz 套件, 命令如下 :
''$ tce-load -wi  iptables.tcz''   
{{{                                    
Downloading: netfilter-3.0.21-tinycore.tcz                                     
Connecting to distro.ibiblio.org (152.19.134.43:80)                            
netfilter-3.0.21-tin 100% |*******************************|   316k  0:00:00 ETA
netfilter-3.0.21-tinycore.tcz: OK                                              
Downloading: iptables.tcz                                                      
Connecting to distro.ibiblio.org (152.19.134.43:80)                            
iptables.tcz         100% |*******************************|   404k  0:00:00 ETA
iptables.tcz: OK  
}}}
重新啟動 NAT 虛擬主機, 命令如下 :
{{{
$ sudo reboot

$ sudo iptables -t nat -L -n                                         
Chain PREROUTING (policy ACCEPT)                                               
target     prot opt source               destination                           
                                                                                 
Chain INPUT (policy ACCEPT)                                                    
target     prot opt source               destination                           
                                                                                  
Chain OUTPUT (policy ACCEPT)                                                   
target     prot opt source               destination                           
                                                                                  
Chain POSTROUTING (policy ACCEPT)                                              
target     prot opt source               destination                           
MASQUERADE  all  --  0.0.0.0/0            0.0.0.0/0 
}}}

''8. 建立虛擬電腦''

請在 conf/mynet.xml 加入以下定義, 命令如下 :
''$ nano conf/mynet.xml''
{{{
<network>
                 :
                 :
  <client name="SH100" osname="TSCKERNEL" ostype="32">
     <vm name="HDNN10" tap="SH100P1" mac="02:01:72:16:10:10" mem="128">
       <!-- 設定虛擬主機的 IP, Default Gateway 及 DNS Server -->
       <ipv4>172.16.100.10:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>

       <!-- 設定虛擬主機使用 root 帳號登入 -->
       <superuser>true</superuser>
     </vm>
     <vm name="HDSN11" tap="SH100P2" mac="02:01:72:16:10:11" mem="128">
       <ipv4>172.16.100.11:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>
     </vm>
  </client>

</network>
}}}
根據 mynet.xml 定義檔產生所需的虛擬主機, 執行以下命令
{{{
$ sudo ./kvmclient.sh SH100
}}}
啟動的虛擬主機會自動登入, 登入後畫面如下 :
{{{
Tiny Server Core 1.0 (Philosopher's Stone)                                     
---------------------------------------------------------------                                   
[eth0]                                                                         
  IP : 172.16.100.11                                                           
  Gateway : 172.16.100.254                                                     
  DNS : 168.95.1.1
}}}
檢測內網是否正常運作, 命令如下 :
{{{
$ ping 172.16.100.254    
}}}
檢測外網是否正常運作, 命令如下 :
{{{
$ ping 168.95.1.1
}}}
檢測 DNS 是否正常運作, 命令如下 :
{{{
$ ping www.hinet.net
}}}
<<toBalaNotes "1">>



///%1
//%/
''參考文章''
1. Network Block Devices: Using Hardware Over a Network (重要)
http://www.linux-mag.com/id/7118/
2. 直接操作 KVM 虛擬硬碟檔 (撰寫程式) 
http://linuxkvm.blogspot.com/2011/09/kvm.html
3. Ubuntu下透明壓縮的檔案系統- btrfs
http://download.ithome.com.tw/article/index/id/2311?tag=rss.qu

{{item1{Network block devices (NBD)}}}
Network block devices are used to access remote storage device that does not physically reside in the local machine. Using Network Block Device, we can access and use the remote storage devices in following three ways on the local machine:
{{{
1. SWAP
2. File System
3. RAW
}}}
NBD presents a remote resource as local resource to the client. Also, NBD driver makes a remote resource look like a local device in Linux, allowing a cheap and safe real-time mirror to be constructed. You can also use remote machine storage area as local machine swap area using NBD. To setup the NBD based file system, we need a ''nbd-server'' (on remote machine, where we like to access/create the content) and ''nbd-client'' (on local machine, where we like to access the remote storage device locally).

@@color:blue;''[重要]'' NBD 可以處理隨需擴增的虛擬硬碟檔 (*.vmdk,*.qcow)@@

[img[img/createvmdisk.png]]

{{item1{啟動 NBD 核心模組}}}

''1. 檢視 nbd 核心模組 (nbd.ko)''
{{{
$ sudo find / -name nbd.ko
/lib/modules/3.2.0-25-generic/kernel/drivers/block/nbd.ko
/lib/modules/3.2.0-26-generic/kernel/drivers/block/nbd.ko
}}}

''2. 確定 nbd 核心模組是否被載入 ?''
在 Ubuntu 12.04 系統存在 nbd 核心模組 (nbd.ko), 但內定沒被自動載入.
{{{
$ lsmod | grep nbd
}}}

''3. 載入 nbd 核心模組''
{{{
$ sudo modprobe nbd max_part=2

# 檢視 nbd 模組
$ lsmod | grep nbd
nbd                    17744  0 

# 共有 16 個 nbd 裝置可使用
$ ll /dev/nbd*
brw-rw---- 1 root disk 43,  0  7月 11 14:22 /dev/nbd0
brw-rw---- 1 root disk 43,  4  7月 11 14:22 /dev/nbd1
brw-rw---- 1 root disk 43, 40  7月 11 14:22 /dev/nbd10
brw-rw---- 1 root disk 43, 44  7月 11 14:22 /dev/nbd11
brw-rw---- 1 root disk 43, 48  7月 11 14:22 /dev/nbd12
brw-rw---- 1 root disk 43, 52  7月 11 14:22 /dev/nbd13
brw-rw---- 1 root disk 43, 56  7月 11 14:22 /dev/nbd14
brw-rw---- 1 root disk 43, 60  7月 11 14:22 /dev/nbd15
brw-rw---- 1 root disk 43,  8  7月 11 14:22 /dev/nbd2
brw-rw---- 1 root disk 43, 12  7月 11 14:22 /dev/nbd3
brw-rw---- 1 root disk 43, 16  7月 11 14:22 /dev/nbd4
brw-rw---- 1 root disk 43, 20  7月 11 14:22 /dev/nbd5
brw-rw---- 1 root disk 43, 24  7月 11 14:22 /dev/nbd6
brw-rw---- 1 root disk 43, 28  7月 11 14:22 /dev/nbd7
brw-rw---- 1 root disk 43, 32  7月 11 14:22 /dev/nbd8
brw-rw---- 1 root disk 43, 36  7月 11 14:22 /dev/nbd9
}}}

{{item1{直接掛載虛擬硬碟檔}}}
你可以在沒啟動虛擬電腦的狀態下, 直接掛載虛擬硬碟檔 (*.qcow2, *.vmdk,...), 進行系統修改, 操作步驟如下 :

1. 將 VMware 虛擬硬碟檔 (*.vmdk) 掛載至 /dev/nbd0 區塊裝置, 命令如下 : 
{{{
$ sudo kvm-nbd --connect=/dev/nbd0 ~/NS88.vmdk 
}}}

@@color:blue;''[注意] NS88.vmdk 要給完整目錄, 上述命令只能執行一次, 如重複執行, 一定會造成 /dev/nbd0 無法使用, 可執行以下命令, 將無法使用的連接移除

$ sudo killall -9 kvm-nbd
''@@
''[註]''  lshw  -class disk  -class storage

2. 檢視虛擬硬碟檔內部資訊 (檔案系統, 分割區)
{{{
$ sudo fdisk /dev/nbd0

WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
         switch off the mode (command 'c') and change display units to
         sectors (command 'u').

Command (m for help): p

Disk /dev/nbd0: 4294 MB, 4294967296 bytes
255 heads, 63 sectors/track, 522 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

所用裝置 Boot      Start         End      Blocks   Id  System
/dev/nbd0p1   *           1         522     4192933+  83  Linux
}}}

3. 將 /dev/nbd0p1 裝置掛載至指定目錄
由前一步驟的資訊中,  可以得知存在 /dev/nbd0p1 這裝置, 其內容為 Linux 檔案系統, 掛載命令如下 :
{{{
$ sudo mkdir /tmp/mydisk
$ sudo mount /dev/nbd0p1 /tmp/mydisk
}}}

4. 檢視虛擬硬碟檔內部的檔案系統
{{{
$ ll /tmp/mydisk
總計 32
drwxr-xr-x  5 root root   4096 2011-08-20 17:33 ./
drwxr-xr-x 23 root root   4096 2011-07-16 19:18 ../
drwxr-xr-x  3 root root   4096 2011-08-19 12:03 boot/
drwx------  2 root root  16384 2011-08-19 12:03 lost+found/
drwxrwxr-x  3 xman staff  4096 2011-08-19 14:04 tce/
}}}

5. 卸載 /dev/nbd0 裝置
{{{
$ sudo umount /tmp/mydisk
$ sudo kvm-nbd -d /dev/nbd0
/dev/nbd0 disconnected 
}}}

<<toBalaNotes "1">>
{{item1{遠端 NBD Server 主機}}}

1. 產生 NBD.img 硬碟檔
{{{
$ kvm-img create -f qcow2 NBD.img 4G
}}}

2. 啟動 NBD Server
{{{
$ sudo kvm-nbd -p 5566 NBD.img &
[1] 6030
}}}
@@color:blue;''
[註] kvm-nbd 內定 port 為 10809, 上述命令只允許一個連接 (自己有連接, 別人便無法連接)
''@@
{{item1{NBD Client 端主機}}}

''1. 安裝 nbd-client 套件''
{{{
$ nbd-client
程式 'nbd-client' 目前尚未安裝。  您可以由輸入以下內容安裝:
sudo apt-get install nbd-client

$ sudo apt-get install nbd-client
正在讀取套件清單... 完成
正在重建相依關係          
正在讀取狀態資料... 完成
下列【新】套件將會被安裝:
  nbd-client
升級 0 個,新安裝 1 個,移除 0 個,有 0 個未被升級。
需要下載 39.1 kB 的套件檔。
此操作完成之後,會多佔用 180 kB 的磁碟空間。
下載:1 http://ftp.twaren.net/Linux/Ubuntu/ubuntu/ precise/main nbd-client amd64 1:2.9.25-2ubuntu1 [39.1 kB]
取得 39.1 kB 用了 2秒 (18.2 kB/s)
正在預先設定套件 ...
Selecting previously unselected package nbd-client.
(正在讀取資料庫 ... 293513 files and directories currently installed.)
正在解開 nbd-client (從 .../nbd-client_1%3a2.9.25-2ubuntu1_amd64.deb)...
正在進行 ureadahead 的觸發程式 ...
ureadahead will be reprofiled on next reboot
正在進行 man-db 的觸發程式 ...
正在設定 nbd-client (1:2.9.25-2ubuntu1) ...
Stopping NBD client process: 
nbd-client.
Starting NBD client process: Connecting...Activating...
nbd-client.
update-initramfs: deferring update (trigger activated)
正在進行 initramfs-tools 的觸發程式 ...
update-initramfs: Generating /boot/initrd.img-3.2.0-26-generic
}}}

在上面安裝過程, 會出現以下選項, 請選擇 "否"

[img[img/nbdclient01.png]]

''2. 連接遠端 NBD 裝置''
{{{
$ sudo nbd-client localhost 5566 /dev/nbd4
Negotiation: ..size = 4096MB
bs=1024, sz=4294967296 bytes
}}}
''[註]'' 如虛擬硬碟檔已格式化, 連接成功後, 會自動產生 /dev/nbd4p1 裝置

''3. 分割 NBD 裝置''
將已存在的分割區刪除
{{{
$ sudo fdisk /dev/nbd4

WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
         switch off the mode (command 'c') and change display units to
         sectors (command 'u').

Command (m for help): p

Disk /dev/nbd4: 2147 MB, 2147483648 bytes
16 heads, 63 sectors/track, 4161 cylinders
Units = cylinders of 1008 * 512 = 516096 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

所用裝置 Boot      Start         End      Blocks   Id  System
/dev/nbd4p1               1         261      131512+  83  Linux

Command (m for help): d
Selected partition 1

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
}}}

建立新的分割區
{{{
$ sudo fdisk /dev/nbd4

WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
         switch off the mode (command 'c') and change display units to
         sectors (command 'u').

Command (m for help): p

Disk /dev/nbd4: 2147 MB, 2147483648 bytes
255 heads, 63 sectors/track, 261 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000

所用裝置 Boot      Start         End      Blocks   Id  System

Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-261, default 1): 
Using default value 1
Last cylinder, +cylinders or +size{K,M,G} (1-261, default 261): +128M

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
}}}

''4. 格式化 NBD 裝置''
{{{
$ sudo mkfs -t ext4 /dev/nbd4p1 
mke2fs 1.42 (29-Nov-2011)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
262144 inodes, 1048320 blocks
52416 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=1073741824
32 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks: 
	32768, 98304, 163840, 229376, 294912, 819200, 884736

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (16384 blocks): done
Writing superblocks and filesystem accounting information: done 
}}}

5. 卸載 NBD 裝置
{{{
$ sudo nbd-client -d /dev/nbd4p1
Disconnecting: que, disconnect, nbd.c:nbd_trip():L613: From: 18446744073709551104, Len: 0, Size: 4294967296, Offset: 0

nbd.c:nbd_trip():L614: requested operation past EOF--bad client?
sock, done
}}}

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. Forwarding ports to guests in libvirt / KVM (重要)
http://serverfault.com/questions/170079/forwarding-ports-to-guests-in-libvirt-kvm

{{item1{NX Free Edition : 由 NoMachine 公司釋出, 最多二個連接數}}}
下載網址 : http://www.nomachine.com/download.php
安裝文章 : http://www.humans-enabled.com/2012/04/how-to-install-freenx-server-on-ubuntu.html

NX Free Server delivers the X Window session to clients via the encrypted SSH (Secure Shell) protocol, and it does it much faster and snappier than my experience with VNC. The only drawback of NX Free Edition is the license, as it is proprietary..

Although "NX Free" edition is said to be "free forever". Looking at the license file in the .deb package, it appears there are a number of GPL-covered items there, and then some items with the proprietary license. 

{{item1{FreeNX : 沒有限制連接數, 從 2008/08/18 之後沒有釋出新版}}}
官方網址 : http://freenx.berlios.de/
安裝文章 : http://rbgeek.wordpress.com/2012/05/16/installing-the-freenx-server-on-ubuntu-12-04-lts/

NX is an exciting new technology for remote display. It provides near local speed application responsiveness over high latency, low bandwidth links. The core libraries for NX are provided by NoMachine under the GPL. FreeNX is a GPL implementation of the NX Server and NX Client Components. 

{{item1{Neatx : 效能優於 FreeNX, 使用 Python 及 c 開發}}}
Neatx 官方網站 : http://code.google.com/p/neatx/

本文網址 : https://help.ubuntu.com/community/FreeNX
Neatx is an Open Source NX server, similar to the commercial NX server from NoMachine. 

''History''
Neatx was developed by Google for an internal project. That project is now finished, and the source was released for the community to use/develop/benefit from. A couple of Google employees are doing sporadic releases and maintenance in their spare time.

''What Works''
{{{
* Session creation
* Session suspension
* Session resumption
* Session shutdown
* Gnome/KDE/Application/Console sessions
* Floating Window/Virtual Desktop sessions
* Fullscreen/Resolution/Keyboard preferences
* Session shadowing (though only sessions belonging to you) 
}}}

<<toBalaNotes "neatx">>
{{item1{安裝 Neatx Server}}}
neatx-server 套件由 "nathan-renniewaldock ppa" 提供, 而有些相依套件則由 "freenx-team" 提供

''1. 增加套件庫''
{{{
$ sudo add-apt-repository ppa:nathan-renniewaldock/ppa && sudo add-apt-repository ppa:freenx-team
[sudo] password for student:
You are about to add the following PPA to your system:
 Various stable software updates.
 More info: https://launchpad.net/~nathan-renniewaldock/+archive/ppa
Press [ENTER] to continue or ctrl-c to cancel adding it

Executing: gpg --ignore-time-conflict --no-options --no-default-keyring --secret-keyring /tmp/tmp.K9II05Vx9L --trustdb-name /etc/apt/trustdb.gpg --keyring /etc/apt/trusted.gpg --primary-keyring /etc/apt/trusted.gpg --keyserver hkp://keyserver.ubuntu.com:80/ --recv 428926204FE30238F00B98224CDB129629A4B41A
gpg: 正在請求金鑰 29A4B41A 自 hkp 伺服器 keyserver.ubuntu.com
gpg: 金鑰 29A4B41A: 公鑰 "Launchpad PPA for Nathan Rennie-Waldock" 已匯入
gpg: 處理總量: 1
gpg:               已匯入: 1  (RSA: 1)
You are about to add the following PPA to your system:
 FreeNX/NX packages

 More info: https://launchpad.net/~freenx-team/+archive/ppa
Press [ENTER] to continue or ctrl-c to cancel adding it

Executing: gpg --ignore-time-conflict --no-options --no-default-keyring --secret-keyring /tmp/tmp.GIl7pT3L9D --trustdb-name /etc/apt/trustdb.gpg --keyring /etc/apt/trusted.gpg --primary-keyring /etc/apt/trusted.gpg --keyserver hkp://keyserver.ubuntu.com:80/ --recv F3A662B57D580D3A2E98E5152A8E3034D018A4CE
gpg: 正在請求金鑰 D018A4CE 自 hkp 伺服器 keyserver.ubuntu.com
gpg: 金鑰 D018A4CE: 公鑰 "Launchpad PPA for FreeNX Team" 已匯入
gpg: 處理總量: 1
gpg:               已匯入: 1  (RSA: 1)
}}}

''2. 更新套件清單''
{{{
$ sudo apt-get update
}}}

At this point, the repository is added and apt is updated, then install the neatx-server package.

''3. 安裝 NeatX Server''
{{{
$ sudo apt-get install neatx-server
student@kvm12041:~$ sudo apt-get install neatx-server
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
下列的額外套件將被安裝:
  libnx-xorg libxcomp3 libxcompext3 libxcompshad3 nx-common nxagent
  python-neatx
下列【新】套件將會被安裝:
  libnx-xorg libxcomp3 libxcompext3 libxcompshad3 neatx-server nx-common
  nxagent python-neatx
升級 0 個,新安裝 8 個,移除 0 個,有 7 個未被升級。
需要下載 3,222 kB 的套件檔。
此操作完成之後,會多佔用 8,258 kB 的磁碟空間。
是否繼續進行 [Y/n]?
}}}

{{item1{安裝 NX Client for Windows}}}

''1. 下載 NX Client for Windows''
下載網址 : http://www.nomachine.com/select-package-client.php

''2. 安裝  NX Client for Windows''

[img[img/neatx/client01.png]]

[img[img/neatx/client02.png]]

{{item1{使用 NX Client for Windows}}}

''@@color:blue;Ubuntu 10.04 連接操作畫面@@''

[img[img/neatx/client03.png]]

[img[img/neatx/client04.png]]

''@@color:blue;Ubuntu 12.04 連接操作畫面@@''

[img[img/neatx/nxclient01.png]]

[img[img/neatx/nxclient02.png]]

{{item1{問與答}}}
''Q1 : 使用 Neatx Client 連接 Neatx Server, 出現以下錯誤訊息''
{{{
NX> 148 Server capacity: not reached for user: root
NX> 105 Restoresession  --link="adsl" --backingstore="1" --encryption="1" --cache="16m" --images="64m" --shmem="1" --shpix="1" --strict="0" --composite="1" --media="0" --session="ud252" --type="unix-gnome" --geometry="900x600" --client="winnt" --keyboard="pc102/us" --id="d80666995a069f8350074440c8a3ca96" --resize="1"
NX> 500 Internal error
NX> 999 Bye.
NX> 280 Exiting on signal: 15
}}}

''A1 : 請將 /var/lib/neatx/sessions 目錄中的所有目錄刪除''
{{{
# ls -al /var/lib/neatx/sessions
總計 36
drwxrwxrwt 9 root    root    4096 2010-08-05 10:35 .
drwxr-xr-x 3 root    root    4096 2010-07-30 16:43 ..
drwx------ 4 root    root    4096 2010-08-04 16:44 006585FE6EE2A053295C846CEDA16F7F
drwx------ 4 root    root    4096 2010-08-05 16:21 1D1C7CDF934BB553676CDBD733A2A294
drwx------ 4 student student 4096 2010-07-30 17:08 266ABDEA5E1803E07AB389F3BE9FF293
drwx------ 4 root    root    4096 2010-08-02 22:41 6D6500B727CEB50301FDB3C74644ECF5
drwx------ 4 root    root    4096 2010-08-01 23:54 7A0E095521405668692794F394E675F6
drwx------ 4 root    root    4096 2010-08-02 23:37 F191AAE99A6AF7575080558055177745
drwx------ 4 student student 4096 2010-07-30 17:17 FFAACB2E98C6300C7E5B9989266E1FF0

# sudo rm /var/lib/neatx/sessions/*
rm: 無法建立目錄「/var/lib/neatx/sessions/006585FE6EE2A053295C846CEDA16F7F」: 是個目錄
rm: 無法建立目錄「/var/lib/neatx/sessions/1D1C7CDF934BB553676CDBD733A2A294」: 是個目錄
rm: 無法建立目錄「/var/lib/neatx/sessions/266ABDEA5E1803E07AB389F3BE9FF293」: 是個目錄
rm: 無法建立目錄「/var/lib/neatx/sessions/6D6500B727CEB50301FDB3C74644ECF5」: 是個目錄
rm: 無法建立目錄「/var/lib/neatx/sessions/7A0E095521405668692794F394E675F6」: 是個目錄
rm: 無法建立目錄「/var/lib/neatx/sessions/F191AAE99A6AF7575080558055177745」: 是個目錄
rm: 無法建立目錄「/var/lib/neatx/sessions/FFAACB2E98C6300C7E5B9989266E1FF0」: 是個目錄

# sudo rm -R /var/lib/neatx/sessions/*

}}}

///%neatx
//%/
''參考文章''
1. Touch 2.2.0 Sencha Docs
http://docs.sencha.com/touch/2.2.0/

{{item1{下載 Sencha 相關套件}}}
''1. Sencha Touch 2 套件下載網址''
http://www.sencha.com/products/touch/download/

''2. Sencha Cmd 套件下載網址''
http://www.sencha.com/products/sencha-cmd/download

''3. 下載 JRE'' 
http://www.oracle.com/technetwork/java/javase/downloads/jre7-downloads-1880261.html

''4. 透過 SSH 將 Sencha Touch 2, Sencha Cmd 及 JRE 套件, 複製到 AS996 主機的 /root 目錄中''

[img[img/sencha.png]]

<<toBalaNotes "1">>
{{item1{安裝 Sencha 套件}}}
''1. 登入 AS996''
{{{
$ sudo virsh console AS996
Connected to domain AS996
Escape character is ^]

student@AS996:~$ 
}}}
''[註]'' 執行以上命令, 需先啟動 Lab202

''2. 切換為 root 登入''
{{{
$ su -
Password: 
root@AS996:~# 
}}}
''[註]'' 密碼為 student

''3. 安裝 JRE''
{{{
# tar xvfz jre-7u21-linux-i586.tar.gz

# nano .bashrc
          :
export JAVA_HOME=/root/jre1.7.0_21/
export PATH=$PATH:$JAVA_HOME/bin

# exit
logout

$ su -
Password: 
root@AS996:~#  
}}}

''4. 安裝 Sencha Touch 2 SDK 套件 ''
將 Sencha Touch 2 SDK 套件解壓縮至 /var/www 目錄
{{{
# cd /var/www/

# sudo unzip /root/sencha-touch-2.2.0-gpl.zip 
}}}

''5. 安裝 Sencha Cmd  套件''
{{{
# cd

# unzip /root/SenchaCmd-3.1.1.274-linux.run.zip 
Archive:  SenchaCmd-3.1.1.274-linux.run.zip
  inflating: SenchaCmd-3.1.1.274-linux.run 

# chmod +x SenchaCmd-3.1.1.274-linux.run

# ./SenchaCmd-3.1.1.274-linux.run 
----------------------------------------------------------------------------
Welcome to the Sencha Cmd Setup Wizard.
The Sencha Cmd utilities are used to package and deploy Sencha applications.

----------------------------------------------------------------------------
Please read the following License Agreement. You must accept the terms of this 
agreement before continuing with the installation.

Press [Enter] to continue :
                            :
                            :
Do you accept this license? [y/n]: y

----------------------------------------------------------------------------
Please specify the directory where Sencha Cmd will be installed.

Installation Directory [/root/bin]: 
----------------------------------------------------------------------------
Setup is now ready to begin installing Sencha Cmd on your computer.

Do you want to continue? [Y/n]: y
----------------------------------------------------------------------------
Please wait while Setup installs Sencha Cmd on your computer.

 Installing
 0% ______________ 50% ______________ 100%
 #########################################

----------------------------------------------------------------------------
Setup has finished installing Sencha Cmd on your computer.

}}}

''6. 測試 Sencha Cmd''
{{{
$ cd /root/bin/Sencha/Cmd/3.1.1.274

$ ./sencha
Sencha Cmd v3.1.1.274
Sencha Cmd provides several categories of commands and some global switches. In
most cases, the first step is to generate an application based on a Sencha SDK
such as Ext JS or Sencha Touch:

    sencha -sdk /path/to/sdk generate app MyApp /path/to/myapp

Sencha Cmd supports Ext JS 4.1.1a and higher and Sencha Touch 2.1 and higher.

To get help on commands use the help command:

    sencha help generate app

For more information on using Sencha Cmd, consult the guides found here:

http://docs.sencha.com/ext-js/4-1/#!/guide/command
http://docs.sencha.com/ext-js/4-2/#!/guide/command
http://docs.sencha.com/touch/2-1/#!/guide/command

Options
  * --cwd, -cw - Sets the directory from which commands should execute
  * --debug, -d - Sets log level to higher verbosity
  * --nologo, -n - Suppress the initial Sencha Cmd version display
  * --plain, -pl - enables plain logging output (no highlighting)
  * --quiet, -q - Sets log level to warnings and errors only
  * --sdk-path, -s - The location of the SDK to use for non-app commands
  * --time, -ti - Display the execution time after executing all commands

Categories
  * app - Perform various application build processes
  * compass - Wraps execution of compass for sass compilation
  * compile - Compile sources to produce concatenated output and metadata
  * fs - Utility commands to work with files
  * generate - Generates models, controllers, etc. or an entire application
  * io - Create, deploy and manage applications on the Sencha.io cloud platform
  * iofs - Manage Files stored in the Sencha.io cloud platform
  * manifest - Extract class metadata
  * package - Manages local and remote packages
  * repository - Manage local repository and remote repository connections
  * theme - Commands for low-level operations on themes

Commands
  * ant - Invoke Ant with helpful properties back to Sencha Cmd
  * build - Builds a project from a legacy JSB3 file.
  * config - Load a properties file or sets a configuration property
  * help - Displays help for commands
  * js - Executes arbitrary JavaScript file(s)
  * upgrade - Upgrades Sencha Cmd
  * which - Displays the path to the current version of Sencha Cmd
}}}
''7. 再次 root 帳號重新登入''
{{{
# exit
logout
student@AS996:~$ su -
Password: 
root@AS996:~# 
}}}
<<toBalaNotes "2">>
{{item1{建立 Sencha 網站專案}}}

''1. 建立  網站專案''
{{{
# sencha -sdk /var/www/touch-2.2.0/ generate app MyApp /var/www/myapp
}}}

''2. 連接 myapp 網站''

[img[img/sencha01.png]]

<<toBalaNotes "3">>
{{item1{修改 myapp 首頁}}}
{{{
$ sudo nano /var/www/myapp/app/view/Main.js 
Ext.define('MyApp.view.Main', {
    extend: 'Ext.tab.Panel',
    xtype: 'main',
    requires: [
        'Ext.TitleBar',
        'Ext.Video'
    ],
    config: {
        tabBarPosition: 'bottom',

        items: [
            {
                title: 'Welcome',
                iconCls: 'home',

                styleHtmlContent: true,
                scrollable: true,

                items: {
                    docked: 'top',
                    xtype: 'titlebar',
                    title: 'Welcome to Sencha Touch 222'
                },

                html: [
                  "I changed the default <b>HTML Contents</b> to something different!"
                ].join("")
            },
            {
                title: 'Get Started',
                iconCls: 'action',

                items: [
                    {
                        docked: 'top',
                        xtype: 'titlebar',
                        title: 'Getting Started'
                    },
                    {
                        xtype: 'video',
                        url: 'http://av.vimeo.com/64284/137/87347327.mp4?token=1330978144_f9b698fea38cd408d52a2393240c896c',
                        posterUrl: 'http://b.vimeocdn.com/ts/261/062/261062119_640.jpg'
                    }
                ]
            }
        ]
    }
});

}}}

''myapp 網站檔案說明''
{{{
    app - The directory containing the Models, Views, Controllers, and Stores for your app.
    app.js - The main JavaScript entry point for your app.
    app.json - The configuration file for your app.
    index.html - The HTML file for your app.
    packager.json - The configuration file used by Sencha Cmd for creating native packages for your application.
    resources - The directory containing the CSS and the images for your app
}}}

///%1
//%/

///%2
//%/

///%3
//%/
<<list shadowed>>
<<forEachTiddler
    where
       'tiddler.tags.contains("文章整理")'

    sortBy
       'tiddler.title.toUpperCase()'

    write '" [["+tiddler.title+" ]] \"view ["+tiddler.title+"]\" [["+tiddler.title+"]] "'

        begin '"<<tabs txtMyAutoTab "'

        end '">"+">"'

        none '"//No tiddler tagged with \"文章整理\"//"'
>>
''參考文章''
1. Monitor Linux file system events with inotify
http://www.ibm.com/developerworks/linux/library/l-inotify/index.html?ca=drs
2. 強大的檔案控管機制-Inotify
http://daydreamer.idv.tw/rewrite.php/read-6.html
3. Monitoring file system events with inotify, incron and authctl (必讀)
http://andries.filmer.nl/kb/Monitoring-file-system-events-with-inotify,-incron-and-authctl/129
4. inotifywait(1) - Linux man page
http://linux.die.net/man/1/inotifywait

inotify 官方網站 : http://inotify.aiken.cz/?section=common&page=home&lang=en

{{item1{認識 inotify}}}
inotify 創作人 Rober Love 在 Linux Journal 上所說的一段話
{{{
inotify is a file change notification system—a kernel feature that allows applications to request the monitoring of a set of files against a list of events. When the event occurs, the application is notified. To be useful, such a feature must be simple to use, lightweight with little overhead and flexible. It should be easy to add new watches and painless to receive notification of events.
}}}
上面那段話簡單的來說, inotify 可以幫助你即時的觀測檔案的改變,並回送通知給你,而 inotify 把檔案狀態的變化分為以下十類
{{{
1. IN_ACCESS:File was read from.
2. IN_MODIFY:File was written to.
3. IN_ATTRIB:File's metadata (inode or xattr) was changed.
4. IN_CLOSE_WRITE:File was closed (and was open for writing).
5. IN_CLOSE_NOWRITE:File was closed (and was not open for writing).
6. IN_OPEN:File was opened.
7. IN_MOVED_FROM:File was moved away from watch.
8. IN_MOVED_TO:File was moved to watch.
9. IN_DELETE File:was deleted.
10. IN_DELETE_SELF:The watch itself was deleted.
}}}
所以不論 讀, 寫, 開啟, 刪除,或 attribute 改變, 都可以從 inotify 收到通知,使用這樣的機制好處是,你再也不用去 polling 你所有要監測的檔案了,這對 embedded device 來說是十分重要的功能,因為它可以簡化你所有監測的動作並且節省 device 的資源

''[註]'' linux kernel在 2.6.13 以後的版本可以使用 Inotify

{{item1{安裝 inotify (HDP120)}}}
{{{
$ apt-get install inotify-tools
}}}

{{item1{實作一次監控}}}

''1. 在背景執行一次監控''
{{{
$ mkdir  /tmp/ABird
$ inotifywait -q  /tmp/ABird &                            
[1] 873
}}}

''[註]'' -q: quiet 簡化輸出的訊息

''2. 在監控目錄中產生檔案''
{{{
$ touch  /tmp/ABird/x
/tmp/ABird/ CREATE x
[1]+  Done                    inotifywait -q /tmp/ABird
}}}

{{item1{全程監控}}}

''1. 監控的目錄是 /tmp/ABird/''
{{{
$ inotifywait -mrq /tmp/ABird/ &
}}}

-m: monitor 如果不使用的話,就僅僅會顯示一筆變動記錄後就結束!
-r: recursive 包括底下所有子路徑

''2. 產生新檔案''
{{{
$ touch /tmp/ABird/y
/tmp/ABird/ CREATE y
/tmp/ABird/ OPEN y
/tmp/ABird/ ATTRIB y
/tmp/ABird/ CLOSE_WRITE,CLOSE y
}}}

''3. 結束監控''
{{{
$ ps aux | grep inotifywait
root       876  0.0  0.0   1884   544 ttyS0    S    11:53   0:00 inotifywait -mrq /tmp/ABird
root       894  0.0  0.1   3328   800 ttyS0    S+   11:59   0:00 grep --color=auto inotifywait

$ kill -9 876
[1]+  已砍掉               inotifywait -mrq /tmp/ABird
}}}

{{item1{監控指定事件}}}

''1. 監控的目錄是 /tmp/ABird/''
{{{
$ inotifywait -mrq -e create,move,delete,modify  /tmp/ABird/ &
}}}

''[註]''
inotifywait -mrq -e create,move,delete,modify  /tmp/ABird/ & 這行指令用 -e,只顯示 create,move,delete,modify 四種事件,當有事件發生時,輸出的訊息有三個欄位,如下:
{{{
/tmp/ABird/ CREATE z
}}}
分別是 "路徑" "事件狀態" "檔案名稱",所以也可以讀入特定的欄位來進行處理。另外,Inotify 預設只能夠監看 8K(8192) 以下的檔案數量,如果檔案數量超出的話,修改 /proc/sys/fs/inotify/max_user_watches 的值就可以增加了

''2. 功能測試''
{{{
$ inotifywait -mrq -e create  /tmp/ABird/ &
[1] 899
# touch /tmp/ABird/y
# touch /tmp/ABird/z
/tmp/ABird/ CREATE z
}}}

''[註]'' y 是已存在的檔案

''3. 結束監控''
{{{
$ ps aux | grep inotifywait
root       876  0.0  0.0   1884   544 ttyS0    S    11:53   0:00 inotifywait -mrq /tmp/ABird
root       894  0.0  0.1   3328   800 ttyS0    S+   11:59   0:00 grep --color=auto inotifywait

$ kill -9 876
[1]+  已砍掉               inotifywait -mrq /tmp/ABird
}}}

{{item1{目錄同步}}}

如果有需要兩個目錄要完全一模一樣,可以利用 rsync:
{{{
inotifywait -mrq -e create,move,delete,modify /home/abc | while read DIR ACT FILE ; do rsync -avq --delete /home/abc /home/def
}}}

這樣當 /home/abc 有任何變動時, /home/def 會即時跟著變動。

{{item1{目錄稽核}}}
Log the /home/andries/myProject directory after being opened in writeable mode
{{{
$ inotifywait -mrq --format '%w%f' -e close_write /home/andries/myProject | while read file;do echo $file >> /var/log/inotify; done &
}}}

&ndash;-format '%w%f' : Echo the full path instead of the file only (default).

{{item1{備份系統目錄}}}
It can be very usefull to make a automatic backup for each file in a directory with a timestamp.
{{{
$ mkdir /var/backups/inotify
}}}

A example to backup all /etc/ files.
{{{
$ inotifywait -mrq --format '%w%f' -e close_write /etc | while read file;do \
    cp --parents $file /var/backups/inotify;mv /var/backups/inotify$file \ 
    /var/backups/inotify$file-`date +'%Y-%m-%d_%H:%M'`; done &
}}}
* Option cp &ndash;-parents use full source file name under DIRECTORY
* Command mv moves the $file to the $file with a date-stamp date +'%Y-%m-%d_%H:%M.


<<toBalaNotes "1">>

{{item1{incron (背景服務)}}}
If you want some monitoring, backup events permanent then incron is very useful.

The inotify cron daemon (incrond) is a daemon which monitors filesystem events and executes commands defined in system and user tables. It's use is generally similar to cron(8).

''安裝 incron''
{{{
# apt-get install incron
Reading package lists... Done
Building dependency tree       
Reading state information... Done
The following NEW packages will be installed:
  incron
0 upgraded, 1 newly installed, 0 to remove and 25 not upgraded.
Need to get 113kB of archives.
After this operation, 377kB of additional disk space will be used.
Get:1 http://tw.archive.ubuntu.com/ubuntu/ lucid/universe incron 0.5.9-4 [113kB]
Fetched 113kB in 14s (7620B/s) 
[master fbbad72] saving uncommitted changes in /etc prior to apt run
 15 files changed, 263 insertions(+), 3 deletions(-)
debconf: delaying package configuration, since apt-utils is not installed
Selecting previously deselected package incron.
(Reading database ... 40781 files and directories currently installed.)
Unpacking incron (from .../incron_0.5.9-4_i386.deb) ...
Processing triggers for man-db ...
Setting up incron (0.5.9-4) ...
Adding group `incron' (GID 108) ...
Done.
update-rc.d: warning: incron stop runlevel arguments (0 1 6) do not match LSB Default-Stop values (1)
 * Starting File system events scheduler                                 [ OK ] 

[master c940cdc] committing changes in /etc after apt run
 14 files changed, 154 insertions(+), 0 deletions(-)
 create mode 100644 incron.allow
 create mode 100644 incron.conf
 create mode 100644 incron.deny
 create mode 100755 init.d/incron
 create mode 120000 rc0.d/K20incron
 create mode 120000 rc1.d/K20incron
 create mode 120000 rc2.d/S20incron
 create mode 120000 rc3.d/S20incron
 create mode 120000 rc4.d/S20incron
 create mode 120000 rc5.d/S20incron
 create mode 120000 rc6.d/K20incron
}}}

''Create your backups''
As a example I have made a small script to backup all files in the etc and myProject directory.
{{{
$ nano /root/inotify.sh

 #!/bin/sh
 
 # Create a inotify backup dir (if not exists)
 #
 mkdir /var/backups/inotify
 
 # Make a copy off the full path and file
 #
 cp -p --parents $1  /var/backups/inotify
 
 # move the file to a file with datetime-stamp
 #
 mv /var/backups/inotify$1 /var/backups/inotify$1_`date +'%Y-%m-%d_%H:%M'
}}}

Make the file executable for root
{{{
$  chmod 755 /root/inotify.sh
}}}

''Open''
{{{
$ incrontab -e

 /etc IN_CLOSE_WRITE,IN_MODIFY /root/inotify.sh $@/$# 
 /home/andries/myProject IN_CLOSE_WRITE /root/inotify.sh $@/$# 
}}}

As you can see you can do many more ;)
* Manpage: http://manpages.ubuntu.com/manpages/lucid/man5/incrontab.5.html

<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
''參考文章''
1. Linux - kvm - How to use USB devices ?
http://dougsland.livejournal.com/93076.html
2. Improved Support for USB with Virt-Manager 0.7
http://www.linux-kvm.com/content/improved-support-usb-virt-manager-07

{{item1{圖形介面操作}}}

[img[img/kvm/kvmusb01.png]]

[img[img/kvm/kvmusb02.png]]

{{item1{命令操作}}}
''0)  Close the Virtual Machine Manager or any instance of VM''

''1) Locate vendor/product id from your device''
{{{
shell> lsusb
Bus 002 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
Bus 007 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 006 Device 004: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 006 Device 003: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 006 Device 002: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 006 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 005 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 001 Device 002: ID eb1b:3212 "blah Technology, Inc. "
Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
Bus 004 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub
Bus 003 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub
}}}

Once you know your vendor and product id:
{{{
Vendor id:   0xeb1b
Product id: 0x3212
}}}

''2) Edit the xml config file from your VM''

Example:
{{{
shell> vi /etc/libvirt/qemu/NAME_OF_YOUR_VIRTUAL_MACHINE_HERE.xml

Add the following lines:

Example:

<devices> (do not include this line)
    <emulator>/usr/bin/qemu-kvm</emulator> (do not include this line)
     <hostdev mode='subsystem' type='usb'>
     <source>
            <vendor id='0xeb1b'/>
            <product id='0x3212'/>
    </source>
    </hostdev>
}}}

''3) Save and restart libvirt''
{{{
shell> /etc/init.d/libvirtd restart
}}}

''4) Now just start your VM and test if your usb was detectd:''
{{{
shell from VM> lsusb
Bus 001 Device 002: ID eb1b:3212 "blah Technology, Inc. "
Bus 001 Device 001: ID 1d6b:0001 Linux Foundation 1.1 root hub
}}}
<<toBalaNotes "usb">>




///%usb
//%/
[img[img/Lab/Lab201.png]]

{{item1{開始建置}}}

''1. 下載 Lab201.zip''
將 Lab201.zip 解壓縮至 /home/student 目錄

''2.建置 Lab201 網路系統架構''
{{{
$ sudo ./LabManager.sh Lab201/

[ KVM 虛擬電腦管理 ]
a) 建立  d) 刪除  q) 離開 > a

=> 開始建立虛擬電腦

建立 NS100 虛擬電腦 ? (y/n) y
複製 NS100.vmdk 檔案 ... 成功
建立 NS100 虛擬電腦完成

建立 NS660 虛擬電腦 ? (y/n) y
複製 NS660.vmdk 檔案 ... 成功
建立 NS660 虛擬電腦完成

建立 NS88 虛擬電腦 ? (y/n) y
複製 NS88.vmdk 檔案 ... 成功
建立 NS88 虛擬電腦完成
}}}

<<toBalaNotes "1">>

{{item1{網路測通}}}

''1. 啟動 Lab101 及 Lab201 所有虛擬系統''
{{{
$ sudo ./LabStart.sh Lab101/

$ sudo ./LabStart.sh Lab201/
}}}

''2. 登入 NS88''
{{{
$ sudo virsh console NS88
Connected to domain NS88
Escape character is ^]

NS88 login: root
Password:

root@NS88:~# 
}}}

''3. 檢測 DNS 系統''
{{{
root@NS88:~# nslookup
> server
Default server: 192.168.88.5
Address: 192.168.88.5#53
> set type=soa
> it66.kvm.
Server:		192.168.88.5
Address:	192.168.88.5#53

Non-authoritative answer:
it66.kvm
	origin = NS660.it66.kvm
	mail addr = admin.NS660.it66.kvm
	serial = 1
	refresh = 43200
	retry = 3600
	expire = 7200
	minimum = 86400

Authoritative answers can be found from:
it66.kvm	nameserver = NS660.it66.kvm.
> apple.com.
Server:		192.168.88.5
Address:	192.168.88.5#53

Non-authoritative answer:
apple.com
	origin = gridmaster-ib.apple.com
	mail addr = hostmaster.apple.com
	serial = 2010074917
	refresh = 1800
	retry = 900
	expire = 2016000
	minimum = 86500

Authoritative answers can be found from:
apple.com	nameserver = nserver.apple.com.
apple.com	nameserver = nserver.euro.apple.com.
apple.com	nameserver = nserver.asia.apple.com.
apple.com	nameserver = nserver2.apple.com.
apple.com	nameserver = nserver4.apple.com.
apple.com	nameserver = nserver3.apple.com.
> exit

}}}

''4. 檢測 Master Browser 系統''
{{{
root@NS88:~# nmblookup -M kvm
querying kvm on 192.168.88.255
192.168.88.5 kvm<1d>

root@NS88:~# nmblookup NS100 
querying NS100 on 192.168.88.255
192.168.100.5 NS100<00>

root@NS88:~# nmblookup NS88 
querying NS88 on 192.168.88.255
192.168.88.5 NS88<00>

root@NS88:~# cat /var/lib/samba/browse.dat 
"KVM"                     c0001000 "NS88"                        "KVM"
"NS88"                    40859a03 "NS88 Samba Server"           "KVM"
"NS100"                   008d9a03 "NS100 Samba Server"          "KVM"
"NS660"                   00859a03 "NS660 Samba Server"          "KVM"
}}}

<<toBalaNotes "2">>
[img[img/Lab/Lab202.png]]

{{item1{開始建置}}}

''1. 下載 Lab202.zip''
將 Lab301.zip 解壓縮至 /home/student 目錄

''2.建置 Lab202 網路系統架構''
{{{
$ sudo ./LabManager.sh Lab202/

[ KVM 虛擬電腦管理 ]
a) 建立  d) 刪除  q) 離開 > a

=> 開始建立虛擬電腦

建立 AS100 虛擬電腦 ? (y/n) y
複製 AS100.vmdk 檔案 ... 成功
建立 AS100 虛擬電腦完成
}}}

<<toBalaNotes "ˇ3">>
{{item1{網路測通}}}

''1. 啟動 Lab202 所有虛擬系統''
{{{
$ virsh start AS100
}}}

''2. 登入 AS100 主機''
{{{
$ sudo virsh console AS100
Connected to domain AS100
Escape character is ^]

$
}}}

''3. 檢測網路系統''
{{{
$ ifconfig -a
eth1      Link encap:Ethernet  HWaddr 52:54:00:a0:08:86  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
          Interrupt:10 Base address:0x4000 

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
                                    :
 
上面顯示的資訊中, 如沒看到 eth0 這網卡名稱, 請執行以下命令 :

$ resetmac

上面命令執行後, 重新開機 

$ reboot

再次登入後, 執行以下命令 :

# ping www.hinet.net
PING www.hinet.net (202.39.224.7) 56(84) bytes of data.
64 bytes from 202-39-224-7.HINET-IP.hinet.net (202.39.224.7): icmp_seq=1 ttl=242 time=86.8 ms
64 bytes from 202-39-224-7.HINET-IP.hinet.net (202.39.224.7): icmp_seq=2 ttl=242 time=166 ms
64 bytes from 202-39-224-7.HINET-IP.hinet.net (202.39.224.7): icmp_seq=3 ttl=242 time=95.1 ms
64 bytes from 202-39-224-7.HINET-IP.hinet.net (202.39.224.7): icmp_seq=4 ttl=242 time=80.1 ms

--- www.hinet.net ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3006ms
rtt min/avg/max/mdev = 80.163/107.249/166.830/34.809 ms

}}}
<<toBalaNotes "4">>


{{{
$ sudo ./cpbok.sh 

192.168.100.0/24 
--------------------------
192.168.100.1 跳過 (本機位址)
192.168.100.8 複製成功
192.168.100.66 無法複製, 因不是 Ubuntu 系統
192.168.100.88 無法複製, 因不是 Ubuntu 系統
192.168.100.254 無法複製, 因不是 Ubuntu 系統

192.168.66.0/25 
--------------------------
192.168.66.1 跳過 (本機位址)
192.168.66.11 沒有安裝 OpenSSH
192.168.66.126 無法複製, 因不是 Ubuntu 系統

192.168.88.0/24 
--------------------------
192.168.88.1 跳過 (本機位址)
192.168.88.11 沒有安裝 OpenSSH
192.168.88.254 無法複製, 因不是 Ubuntu 系統


}}}
{{item1{安裝 on cloud 9 雲端系統}}}

''1. 檢視 oc9bok 目錄結構'' 
{{{
$ cd oc9bok/
$ ll
總計 68
drwx------ 6 root root 4096 2011-08-29 13:36 ./
drwx------ 6 root root 4096 2011-08-29 13:58 ../
drwx------ 2 root root 4096 2011-08-29 13:36 cgi-bin/
drwx------ 2 root root 4096 2011-08-29 13:36 conf/
drwx------ 2 root root 4096 2011-08-29 13:36 demo/
drwx------ 2 root root 4096 2011-08-29 13:36 lib/
-rwx------ 1 root root 2956 2011-08-29 13:36 oc9boksetup.sh*
-rwx------ 1 root root 4393 2011-08-29 13:36 oc9bok.sh*
-rwx------ 1 root root  829 2011-08-29 13:36 oc9cgi.sh*
-rwx------ 1 root root 5515 2011-08-29 13:36 oc9SuperUser.sh*
-rwx------ 1 root root 3314 2011-08-29 13:36 oc9TestSystem.sh*
-rwx------ 1 root root 2954 2011-08-29 13:36 oc9UpdateDir.sh*
-rwx------ 1 root root 2039 2011-08-29 13:36 oc9UpdateLoginList.sh*
-rwx------ 1 root root 5480 2011-08-29 13:36 oc9User.sh*

}}}

''2. 開始安裝''
{{{
$ sudo ./oc9bok.sh
確定要安裝 Apache2 伺服器 (y/n) : y
Apache2 伺服器安裝成功
家目錄模組 (userdir) 安裝成功
設定檔 (/etc/apache2/httpd.conf) 複製成功
Apache2 伺服器重新啟動成功

確定要安裝 Samba 伺服器 (y/n) : y
}}}

''3. 進入主選單''
{{{

====================== on cloud 9 雲端系統 (V 0.2) =======================
 超級使用者資訊 (帳號未產生)

 本機資訊
   IP 位址 : 192.168.100.8
   Gateway 位址 : 192.168.100.254
   DNS 位址 : 168.95.1.1
==========================================================================
[1] 批次建立使用者帳號
[2] 批次刪除使用者帳號
[3] 批次更新使用者家目錄 (/home) 資料
[4] 更新使用者登入清單 (Samba)
[5] 學習評量
[6] 建立與刪除超級使用者帳號 (oc9root)
[7] 編輯設定檔 (/root/oc9bok/conf/oc9.conf)
[8] 離開

輸入代號, 執行所需的功能 : 

}}}

{{item1{on cloud 9 雲端系統操作}}}
''1. 編輯設定檔''
在主選單, 選擇 7, 開始設定檔編輯, 如下 :
{{{
<?xml version="1.0" encoding="utf-8" ?>
<oncloud9>
  <CloudAdmin>
    Title:私有知識雲管理者
    Date:2011-08-31
    SuperUser:雲行者
    LoginName:oc9root
    Password:goforwin
    EMail:oc9root@gmail.com
  </CloudAdmin>

  <!--
    0: 一般目錄
    1: 評量目錄
    2: 系統目錄
    3: 網站
  -->
  <CloudDir>
    <![CDATA[bok:0]]>
    <![CDATA[app:0]]>
    <![CDATA[exam:1]]>
    <![CDATA[sys:2]]>
    <![CDATA[www:3]]>
  </CloudDir>

  <CloudUser>
    lcj01:goforwin:張三逢:lcj01@gmail.com
    lcj02:goforwin:陳漢點:lcj02@gmail.com
    lcj03:goforwin:鄭如意:lcj03@gmail.com
    lcj04:goforwin:真抱歉:lcj04@gmail.com
  </CloudUser>

  <CloudDB>
    ServerIP:192.168.100.8
    ServerPort:5948
  </CloudDB>

  <CloudTest>
    SCJP1.5
    SCJP1.6
    LPIC
  </CloudTest>

</oncloud9>
}}}






///%1
//%/

///%2
//%/

///%ˇ3
//%/

///%4
//%/
本文網址 : http://www.ithome.com.tw/itadm/article.php?c=66894

教育部日前展現教育部應用RFID協助學生安全計畫成果,2年來透過多所大專院校,在24所一般小學及特教學校進行測試,包括如校園危險區域警示、出缺勤追蹤、校外教學管理、學生體溫量測等應用。

教育部日前展現教育部應用RFID協助學生安全計畫成果,2年來透過多所大專院校如臺北科技大學、中正大學、高雄師範大學、明志科技大學、中興大學及成功大學,在24所一般小學及特教學校進行測試,包括如校園危險區域警示、出缺勤追蹤、校外教學管理、學生體溫量測等應用。

學生身上在配備主動式RFID標籤後,校方可在校園死角或偏僻處如地下室、頂樓加裝RFID讀取設備,當學生接近這些有安全風險的區域時,系統會示警並通報老師和警衛。中興大學則在教室加裝RFID讀取設備推出防詐騙服務,當學生家長接到詐騙電話時,可以透過網站查詢,或發簡訊向系統查詢,系統也會立刻回報學生所在位置。

明志科技大學則設計了一套校外教學安全管理做法,校外教學時讓學生配戴主動式RFID,老師就可以設定學生與老師的距離,當學生脫隊遠離時,系統也能在帶隊老師攜帶的PDA上示警。文⊙蔡宛儒 


本文網址 : http://www.ducea.com/2006/05/30/managing-apache2-modules-the-debian-way/

''The Apache2 HTTP Server is a modular program'', where we can choose its functionality by including in the server a set of modules. The modules can be statically compiled into the httpd binary when the server is built. Alternatively, modules can be compiled as ''Dynamic Shared Objects (DSOs)'' that exist separately from the main httpd binary file.

Normally enabling one particular apache DSO module will involve editing the main apache configuration file and adding a ''LoadModule'' line to enable the loading of the particular module. Depending from the module itself, we might need to add also some configuration directives. This will work fine on Debian also, but I am going to show you the Debian particular method of managing apache2 modules.

Regardless of the apache MPM (Multi-Processing Modules) you are using: apache2-mpm-prefork, apache2-mpm-worker or apache2-mpm-perchild after the installation you will end up with some default modules: some already enabled and some ready to be used. Opposed to a RedHat based system for example (where they will try to enable all the possible modules) the Debian package will enable by default only a very small amount of modules.

''內建在 Apache 核心的模組''
{{{
$ sudo apache2 -l
Compiled in modules:
  core.c
  mod_log_config.c
  mod_logio.c
  worker.c
  http_core.c
  mod_so.c
}}}

''[註]'' These modules can’t be disabled without recompiling the apache package

The Ubuntu apache2 package provides a unique mode of managing modules. All the loading and configuration related entries are found in individual files inside folder ''/etc/apache2/mods-available/''. Here we will find files like module_name.load (and if needed module_name.conf). Also all additional installed modules will place their configuration files in the same place.

Inside the folder ''/etc/apache2/mods-enabled/'' we will find all the enabled modules. Here we will find symlinks to the files from mods_available for all the enabled modules. Only the modules found in this folder will be enabled at run time.

''列出已啟動的動態模組''
{{{
$ ls /etc/apache2/mods-enabled/
alias.conf            authz_user.load  dir.conf          reqtimeout.conf
alias.load            autoindex.conf   dir.load          reqtimeout.load
auth_basic.load       autoindex.load   env.load          setenvif.conf
authn_file.load       cgid.conf        mime.conf         setenvif.load
authz_default.load    cgid.load        mime.load         status.conf
authz_groupfile.load  deflate.conf     negotiation.conf  status.load
authz_host.load       deflate.load     negotiation.load
}}}

For example the configuration file for mod_dir includes only one line to load the module:
{{{
$ cat /etc/apache2/mods-enabled/dir.load
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
}}}

{{item1{Apache2 動態模組管理命令}}}
So in order to enable one additional module we will only have to create the proper symlinks from the mods-available to the mod-enabled files… But why not use the little tools Debian provides us for this:

''a2enmod'': enables an apache2 module (this does nothing else but creates the proper links to the module .load and .conf files). 
''a2dismod'': disables an apache2 module (removes the links from mod-enabled for the module). 

''命令實作''
1. Running ''a2enmod'' without any parameter will show the possible choices:
{{{
$ sudo a2enmod
Your choices are: actions alias asis auth_basic auth_digest authn_alias authn_anon authn_dbd authn_dbm authn_default authn_file authnz_ldap authz_dbm authz_default authz_groupfile authz_host authz_owner authz_user autoindex cache cern_meta cgi cgid charset_lite dav dav_fs dav_lock dbd deflate dir disk_cache dump_io env expires ext_filter file_cache filter headers ident imagemap include info ldap log_forensic mem_cache mime mime_magic negotiation proxy proxy_ajp proxy_balancer proxy_connect proxy_ftp proxy_http proxy_scgi reqtimeout rewrite setenvif speling ssl status substitute suexec unique_id userdir usertrack version vhost_alias
Which module(s) do you want to enable (wildcards ok)?
}}}

''[註]'' 直接按 Enter 鍵, 結束 a2enmod 命令執行
 
2. Running ''a2dismod'' without any parameter again will show us the list of enabled modules and allow to choose one:
{{{
$ sudo a2dismod
Your choices are: alias auth_basic authn_file authz_default authz_groupfile authz_host authz_user autoindex cgid deflate dir env mime negotiation reqtimeout setenvif status
Which module(s) do you want to disable (wildcards ok)?
}}}

''[註]'' a2dismod 命令會先將所有已啟動的動態模組列出, 然後詢問你要停止那個動態模組 

Dont forget to reload the apache daemon, after making any changes to the list of enabled modules:
{{{
/etc/init.d/apache2 reload
}}}

''啟動 mod_ssl 動態模組''
{{{
$ sudo a2enmod ssl
Enabling module ssl.
See /usr/share/doc/apache2.2-common/README.Debian.gz on how to configure SSL and create self-signed certificates.
Run '/etc/init.d/apache2 restart' to activate new configuration!
}}}

執行完啟動命令後, 在 /etc/apache2/mods-enabled/ 目錄中, 會出現 ssl.conf 及 ssl.load 這二個檔案, 如下 :
{{{
$ ls /etc/apache2/mods-enabled/
alias.conf            autoindex.conf  env.load          setenvif.load
alias.load            autoindex.load  mime.conf         ssl.conf
auth_basic.load       cgid.conf       mime.load         ssl.load
authn_file.load       cgid.load       negotiation.conf  status.conf
authz_default.load    deflate.conf    negotiation.load  status.load
authz_groupfile.load  deflate.load    reqtimeout.conf
authz_host.load       dir.conf        reqtimeout.load
authz_user.load       dir.load        setenvif.conf
}}}

''停止 mod_ssl 動態模組''
{{{
$ sudo a2dismod ssl
Module ssl disabled.
Run '/etc/init.d/apache2 restart' to activate new configuration!
}}}
<<toBalaNotes "modules">>


///%modules
//%/
''參考文章''
1. Create Ajax applications for the mobile Web
http://www.ibm.com/developerworks/opensource/library/wa-aj-mobileajax/
2. Android and iPhone browser wars, Part 1: WebKit to the rescue
http://www.ibm.com/developerworks/opensource/library/os-androidiphone1/
3. Android and iPhone browser wars, Part 2: Build a browser-based application for iPhone and Android
http://www.ibm.com/developerworks/opensource/library/os-androidiphone2/
4. Creating mobile Web applications with HTML 5, Part 4: Using Web Workers to speed up your mobile Web applications
http://www.ibm.com/developerworks/xml/library/x-html5mobile4/
5. Mobile web application framework match-up, Part 3: Boost your next mobile web app with jQTouch
http://www.ibm.com/developerworks/web/library/wa-jqtouch/index.html
''參考文章''
1. How you can use qemu/kvm base images to be more productive (Part 1)
http://www.linux-kvm.com/content/how-you-can-use-qemukvm-base-images-be-more-productive-part-1

{{item1{硬碟映像檔儲存目錄建立}}}

[img[img/kvm/vmstorage01.png]]

[img[img/kvm/vmstorage02.png]]

[img[img/kvm/vmstorage03.png]]

[img[img/kvm/vmstorage04.png]]

<<toBalaNotes "1">>

{{item1{產生硬碟映像檔}}}

[img[img/kvm/vmdkimg00.png]]

[img[img/kvm/vmdkimg01.png]]

[img[img/kvm/vmdkimg02.png]]

{{item1{檢視硬碟映像檔資訊}}}
{{{
# kvm-img info -f qcow2 US1041_NS.img 
image: US1041_NS.img
file format: qcow2
virtual size: 4.0G (4294967296 bytes)
disk size: 983M
cluster_size: 65536
}}}
<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
''參考文章''
1. Memory management in VMware vSphere (必讀, VMware 一樣有支援 Balloon 及 KSM)
http://blogs.vmware.com/virtualreality/2011/02/hypervisor-memory-management-done-right.html

{{item1{Transparent Page Sharing 或 Kernel Samepage Merging}}}
 Think of it as de-duplication for your memory.  During periods of idle CPU activity, ESXi scans memory pages loaded by each VM to find matching pages that can be shared. The memory savings can be substantial, especially when the same OS or apps are loaded in multiple guests, as is the case with VDI.  Transparent Page Sharing has a negligible effect on performance (sometimes it evens improves guest performance) and users can tune ESXi parameters speed up scanning if desired.  Also, despite claims by our competitors, Transparent Page Sharing will in fact work with large memory pages in guests by breaking those pages into smaller sizes to enable page sharing when the host is under memory pressure.
    
{{item1{Guest Ballooning}}}
This is where ESXi achieves most of its memory reclamation.  When the ESXi hypervisor needs to provide more memory for VMs that are just powering on or getting busy, it asks the guest operating systems in other VMs to provide memory to a balloon process that runs in the guest as part of the VMware Tools.  ESXi can then loan that “ballooned” memory to the busy VMs.  The beauty of ballooning is that it’s the guest OS, not ESXi, that decides which processes or cache pages to swap out to free up memory for the balloon.  The guest, whether it’s Windows or Linux, is in a much better position than the ESXi hypervisor to decide which memory regions it can give up without impacting performance of key processes running in the VM.
    
{{item1{Hypervisor Swapping}}}
Any hypervisor that permits memory oversubscription must have a method to cope with periods of extreme pressure on memory resources.  Ballooning is the preferred way to reclaim memory from guests, but in the time it takes for guests to perform the in-guest swapping involved, other guests short on memory would experience freezes, so ESXi employs hypervisor swapping as a fast-acting method of last resort.  With this technique, ESXi swaps its memory pages containing mapped regions of VM memory to disk to free host memory.  Reaching the point where Hypervisor swapping is necessary will impact performance, but vSphere supports swapping to increasingly common solid state disks, which testing shows can cut the performance impact of swapping by a factor of five.
    
{{item1{Memory Compression}}}
To reduce the impact of hypervisor swapping, vSphere 4.1 introduced memory compression.  The idea is to delay the need to swap hypervisor pages by compressing the memory pages managed by ESXi – if two pages can be compressed to use only one page of physical RAM, that’s one less page that needs to be swapped.  Because the compression/decompression process is so much faster than disk access, performance is preserved.

<<toBalaNotes "1">>


///%1
//%/
FloppyFW 已內建 NAT 功能, 並且已開啟 IP 路由, 這二個功能是在 firewall.init 中設定, 如要自行設定, 那就必需取消 firewall 功能

{{item1{NAT - 暫時設定}}}

''1. 啟動 ffwNAT 虛擬主機''
{{{
$ virsh start ffwNAT
}}}

''2. 登入 ffwNAT 虛擬主機''
{{{
$ virsh console ffwNAT
}}}

''3. 啟動 NAT 功能''
{{{
$ iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE

$ echo 1 > /proc/sys/net/ipv4/ip_forward 

$ route add -net 192.168.88.0 netmask 255.255.255.0 gw 192.168.100.20 dev eth1

$ route add -net 192.168.99.0 netmask 255.255.255.0 gw 192.168.100.10 dev eth1
}}}

{{item1{NAT - 永久設定}}}
執行 ''setffw.sh'' 程式, 會自動掛載 image 檔, 修改 network.ini 設定檔後, 會自動卸載 image 檔

''1. 編輯 network.ini 設定檔''
{{{
$ setffw.sh  -n  ffwNAT
}}}

''2. 在 network.ini 最後, 加入以下設定''
{{{
iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE

echo 1 > /proc/sys/net/ipv4/ip_forward 

route add -net 192.168.88.0 netmask 255.255.255.0 gw 192.168.100.20 dev eth1

route add -net 192.168.99.0 netmask 255.255.255.0 gw 192.168.100.10 dev eth1
}}}

''3. 重新啟動 ffwNAT''

<<toBalaNotes "1">>

{{item1{靜態路由 - 暫時設定}}}
''1. 啟動 R1 虛擬主機''
{{{
$ virsh start ffwR1
}}}

''2. 登入 R1 虛擬主機''
{{{
$ virsh console ffwR1
}}}

''3. 啟動 Routing 功能''
{{{
$ echo 1 > /proc/sys/net/ipv4/ip_forward 

$ route add -net 192.168.88.0 netmask 255.255.255.0 gw 192.168.100.20 dev eth0   (ffwR1 的設定)

or

$ route add -net 192.168.99.0 netmask 255.255.255.0 gw 192.168.100.10 dev eth0  (ffwR2  的設定)

}}}

{{item1{靜態路由 - 永久設定}}}
執行 ''setffw.sh'' 程式, 會自動掛載 image 檔, 修改 network.ini 設定檔後, 會自動卸載 image 檔

''1. 編輯 network.ini 設定檔''
{{{
$ setffw.sh  -n  ffwR1
}}}

''2. 在 network.ini 最後, 加入以下設定''
{{{
echo 1 > /proc/sys/net/ipv4/ip_forward 

route add -net 192.168.88.0 netmask 255.255.255.0 gw 192.168.100.20 dev eth0  (ffwR1 的設定)

或

route add -net 192.168.99.0 netmask 255.255.255.0 gw 192.168.100.10 dev eth0  (ffwR2  的設定)
}}}

<<toBalaNotes "2">>

{{item1{network.ini 內容}}}
{{{
# nano /media/floppy0/network.ini 
#!/bin/sh

# $Id: network.ini,v 1.9 2005/08/13 10:02:16 thomasez Exp $

# Remember to set up the network interface card with IRQ and base address
# in syslinux.cfg if nessesary.

#
# Grabbing the config.
#
. /etc/config

#
# Grabbing the function library.
#
. /etc/functions.inc

ifconfig lo 127.0.0.1


#
# Brad wanted these next 5 lines.
#
cat > /etc/inside.info <<-EOF
	INSIDE_DEVICE=$INSIDE_DEV
	INSIDE_IP=$INSIDE_IP
	INSIDE_NETMASK=$INSIDE_NETMASK
EOF

#
# Let's make things easier for the users and find this automagically.
#
[ -n "$INSIDE_IP" ] && [ -n "$INSIDE_NETMASK" ] && {
	eval `ipcalc -n -b $INSIDE_IP $INSIDE_NETMASK`
	INSIDE_NETWORK=$NETWORK
	unset NETWORK
	INSIDE_BROADCAST=$BROADCAST
	unset BROADCAST
}


cat >> /etc/inside.info <<-EOF
	INSIDE_NETWORK=$INSIDE_NETWORK
	INSIDE_BROADCAST=$INSIDE_BROADCAST
EOF

# Resetting.
NETWORK=
BROADCAST=

#
# Setting up the inside:
#
configure_interface $INSIDE_DEV $INSIDE_IP \
	netmask $INSIDE_NETMASK broadcast $INSIDE_BROADCAST

#
# setting up /etc/hosts
#
echo ""
echo "$INSIDE_IP $HOSTNAME.$DOMAIN  $HOSTNAME" >> /etc/hosts
# setting up hostname
hostname $HOSTNAME
hostname -d $DOMAIN
echo "Hostname (fully qualified) set up to `hostname -f`"

#
# Tip from Jacco Kok. Setting the MAC address to fool 
# some bootp/dhcp servers and arp.
#
if [ -n "$OUTSIDE_MAC" ]
then
	echo "Faking MAC address."
	ifconfig $OUTSIDE_DEV hw ether $OUTSIDE_MAC
fi

#
# DMZ Setup. This has to be done before the OUTSIDE stuff since
# it's the outside stuff that starts firewall.ini
#
if bool_value $USE_DMZ
then
	#
	# Let's make things easier for the users and find this automagically.
	#
	echo "Setting up DMZ network."
	eval `ipcalc -n -b $DMZ_IP $DMZ_NETMASK`
	DMZ_NETWORK=$NETWORK
	DMZ_BROADCAST=$BROADCAST

	configure_interface $DMZ_DEV $DMZ_IP netmask $DMZ_NETMASK broadcast $DMZ_BROADCAST

	cat > /etc/dmz.info <<EOF
DMZ_DEVICE=$DMZ_DEV
DMZ_IP=$DMZ_IP
DMZ_NETMASK=$DMZ_NETMASK
DMZ_NETWORK=$DMZ_NETWORK
DMZ_BROADCAST=$DMZ_BROADCAST
EOF

fi

#
# Outside settings basec on the CONNECT_TYPE:
#

#
# All connection methods other than the default (STATIC) has to:
#
# - Create /etc/outside.info
# - Create /etc/resolf.conf
# - Set default gateway
# - Boot /etc/firewall.ini


case "$CONNECT_TYPE" in
	#
	# Both PPP and PPPoE is taken care of by the ppp-up.ini script.
	#
	PPP|PPPoE|PPPOE)
		echo "Connetion method is PPP(oE), "
		echo -n "loading PPP modules."
		echo -n "slhc "
		modprobe slhc
		echo -n "ppp_generic "
		modprobe ppp_generic
		echo "ppp_async "
		modprobe ppp_async
		/etc/ppp/ppp-up
		#
		# Uwe Dippel wanted this one here.
		#
		if [ $DEMAND != 'no' ]; then
			echo "Demand dialing enabled, running firewall.init"
			/etc/firewall.init
		fi
		;;
	DHCP)
		echo "Connetion method is DHCP"
		echo "OUTSIDE_DEVICE=$OUTSIDE_DEV" > /etc/outside.info
		HARGS=
		[ "$USER_IDENT" != "" ] && HARGS="-H $USER_IDENT"
		if /sbin/udhcpc -n -s /etc/udhcpcrenew.sh $HARGS -i $OUTSIDE_DEV
		then
			. /etc/outside.info
		else
			echo "duh!"	# Or some more useful error handling
		fi
		;;
	EXTERNAL)
		echo "Connetion method is an External script (/etc/ext-up.ini)"
		/etc/ext-up.init
		;;
	*)  # STATIC and the rest.
		echo "Connetion method is the default (STATIC)."

		#
		# Let's make things easier for the users and
		# find this automagically.
		#
		eval `ipcalc -n -b $OUTSIDE_IP $OUTSIDE_NETMASK`
		OUTSIDE_NETWORK=$NETWORK
		OUTSIDE_BROADCAST=$BROADCAST

		configure_interface $OUTSIDE_DEV $OUTSIDE_IP \
			netmask $OUTSIDE_NETMASK broadcast $OUTSIDE_BROADCAST
		route add default gw $DEFAULT_GATEWAY metric 1

		echo "Setting up name server (etc/resolv.conf) "

		echo "search $DOMAIN" >> /etc/resolv.conf

		NAME_SERVER=`echo $OUTSIDE_NAMESERVERS | sed 's/,/ /g'`
		for i in $NAME_SERVER
		do
			echo "nameserver $i" >> /etc/resolv.conf
		done
		unset i

		cat > /etc/outside.info <<EOF
OUTSIDE_DEVICE=$OUTSIDE_DEV
OUTSIDE_IP=$OUTSIDE_IP
OUTSIDE_NETMASK=$OUTSIDE_NETMASK
OUTSIDE_NETWORK=$OUTSIDE_NETWORK
OUTSIDE_BROADCAST=$OUTSIDE_BROADCAST
OUTSIDE_GATEWAY=$DEFAULT_GATEWAY
EOF
		# Resetting.
		NETWORK=
		BROADCAST=

		echo "Setting up firewall rules: "
		/etc/firewall.init
		echo
		;;
esac


#
# No umask so it ends up with 600 with both dhcp and PPPoE
# I'm lazy and place it here.
chmod 644 /etc/resolv.conf

#
# DHCP Daemon and DNS Cache.
#
p=`pidof dnsmasq`

DNSMASQ_OPTS="-i $INSIDE_DEV"

if bool_value "$USE_DMZ"
then
  DNSMASQ_OPTS="$DNSMASQ_OPTS -i $DMZ_DEV"
fi

if bool_value "$DHCP_SERVER"
then
	/etc/udhcpd.conf.sh
	/sbin/udhcpd /etc/udhcpd.conf
	pidof dnsmasq > /dev/null || /sbin/dnsmasq $DNSMASQ_OPTS 
else
	if bool_value "$DNSMASQ"
	then
		pidof dnsmasq > /dev/null || /sbin/dnsmasq $DNSMASQ_OPTS 
	fi
fi

if bool_value "$DMZ_DHCP_SERVER"
then
	/etc/dmz-udhcpd.conf.sh
	/sbin/udhcpd /etc/dmz-udhcpd.conf
	pidof dnsmasq > /dev/null || /sbin/dnsmasq $DNSMASQ_OPTS 
fi
}}}

///%1
//%/

///%2
//%/
''參考文章''
1. 雲端運算服務資訊安全 (寫的很好)
http://mic.iii.org.tw/aisp/reports/reportdetail_register.asp?docid=2825&rtype=freereport
2. Trusted Computing Group
http://en.wikipedia.org/wiki/Trusted_Computing_Group
3. Trusted Computing: Promise and Risk
https://www.eff.org/wp/trusted-computing-promise-and-risk

{{item1{雲端安全}}}
{{op1{1. *雲端運算* 加強雲端基礎架構的邊界防禦力}}}
本文網址 : http://www.facebook.com/note.php?note_id=381830056993

雖然,企業通常都有嚴格的邊界安全措施,例如:防火牆、網路入侵偵測系統(IDS) 以及入侵預防系統 (IPS),但偶爾還是會有惡意程式潛入端點裝置。

最近 Zeus、Aurora/HYDRAQ 以及 Mariposa 等殭屍網路/傀儡網路 Botnet所發動的一些攻擊事件,都是透過入侵端點來達成。前陣子發生在加拿大卡加立(Calgary) 市一家醫療診所的入侵事件就是最好的證明,該診所在事件之後被迫必須通知 4,700 名病患。

專門鎖定特定目標的魚叉式網路釣魚 Phishing攻擊一旦入侵成功,就可以再入侵IT 管理員的端點,然後使用該端點上的系統管理帳號進入伺服器或雲端基礎架構,進而竊取資料。

''駭客只需要一張偷來的信用卡就能進入IaaS 業者的防線''

當然,基礎架構服務(IaaS) 雲端供應商也會設置邊界安全措施來保護客戶的伺服器。IaaS 業者通常是建置防火牆來保護客戶,不過,駭客卻只需要一張信用卡就能進入IaaS 業者的防線之內。歹徒可利用一張偷來的信用卡,向IaaS 業者租用一段時間的基礎架構服務,就能存取該業者的基礎架構,甚至讓他們的雲端伺服器和您的虛擬伺服器配置在同一部實體伺服器上。

''面對這樣的威脅,企業如何防禦?''

對於一般企業基礎架構的保護,這意謂者採用傳統的「縱深防禦」措施,也就是在動態虛擬化環境中的每一台主機都必須受到保護。但在IaaS 的情況下,企業要先了解自己須肩負保護自己伺服器的責任,然後在現有的雲端安全措施之外,添加一些自己的主機式安全防護,包括:防火牆、漏洞防禦 (IDS/IPS)、系統檔案一致性監控以及記錄檔檢查。

如果您是一家企業,並且擔心自己是否感染了殭屍網路/傀儡網路 Botnet程式,建議您利用一些閘道端評估工具來看看您是否已經遭到入侵。

{{op1{2. 雲端身份認證}}}
OAuth 網址 : http://oauth.net/

''OAuth'' provides a method for clients to access server resources on behalf of a resource owner (such as a different client or an end-user).  It also provides a process for end-users to authorize third-party access to their server resources without sharing their credentials (typically, a username and password pair), using user-agent redirections.

{{item1{論文 : A Security Management Architecture for the Protection of Kernel Virtual Machines}}}
網址 : http://www.computer.org/portal/web/csdl/doi/10.1109/CIT.2010.175

Virtualization is being pervasively adopted in a variety of scenarios ranging from regular desktop PCs to server farms and clusters. Indeed, the security of guest virtual machines and of the applications and services they host can be improved by leveraging the additional architectural layer introduced by such a technology. This paper discusses security management for virtualized environments and provides several contributions. First, a novel architecture (Kvm-SMA) with the following features is detailed: it can protect guest integrity from both remote and local attacks such as root-kits, viruses, and worms; it is not circumventable and it is completely transparent to guest machines; it can asynchronously analyze guest data and monitor guest system behavior. Second, the proposed architecture has been implemented entirely on open source software and can be replicated to both Linux and Windows guests. Third the effectiveness and efficiency of the proposed architecture is shown. The former is proved showing the results of root-kit detection test, while the latter is supported by standard performance tests showing that the introduced overhead is small. Finally, a distinguishing feature of our monitoring system proposal is that it is immune to timing attacks: that is, an adversary cannot notice the monitoring system is active by analyzing the time required to perform system calls. We believe that security management of both single virtualized hosts and distributed virtualized systems can benefit from our proposal

{{item1{Trusted Platform Module (TPM)}}}
In computing, Trusted Platform Module (TPM) is both the name of a published specification detailing a secure cryptoprocessor that can store cryptographic keys that protect information, as well as the general name of implementations of that specification, often called the "TPM chip" or "TPM Security Device" (as designated in certain Dell BIOS settings[1]). The TPM specification is the work of the Trusted Computing Group. The current version of the TPM specification is 1.2 Revision 116, published on March 3th, 2011.[2] This specification is also available as the international standard ISO/IEC 11889.

[img[img/TPM_Asus.jpg]]
<<toBalaNotes "1">>

{{item1{雲端服務資訊安全評估}}}
近年來雲端服務的議題發酵,吸引許多企業投入該領域,且各國政府亦將雲端服務發展列入國家重大資訊政策。但若深入探討企業採用雲端服務的狀況,可發現目前僅有少數企業已採用雲端服務,且多數企業未來亦無強烈意願採用雲端服務,整體雲端服務市場仍處於觀望階段。

究其原因,除了雲端服務商業模式尚未成熟,可供企業或一般消費者使用的雲端運算服務數量仍偏低外,企業採用雲端服務時,須將公司資料放置於外部伺服器,且在多租用戶的架構下,各企業用戶的資料可能被共同放置於一部伺服器上,關於資料的安全性考量以及雲端服務供應商是否能夠維持穩定服務水準的疑慮使得企業採用雲端服務前須多加考慮,因此,雲端服務的資訊安全成為企業是否採用雲端運算服務的決定性因素。

''雲端服務資訊安全評估步驟''

目前許多雲端服務供應商聲明已具備足夠的雲端資訊安全能力,且市面上亦有許多雲端資訊安全解決方案可以採用,然而企業採用雲端服務所面臨的資訊安全問題,會隨著企業採用的種類以及使用方式而有所不同。因此雲端服務供應商所提供的資訊安全等級是否能夠符合企業的需求仍然需要經過企業自行評估才能確定,以下提出企業採用雲端服務時對於資訊安全的評估步驟:

''選定採用雲端服務的種類''

企業採用雲端服務之前,需先分辨清楚採用雲端服務的種類。雲端服務可分為基礎設施層(IaaS)、平台層(PaaS)和應用層(SaaS)三種層次,與公有雲、私有雲以及混合雲三種服務方式,採用服務種類的不同,企業面臨的雲端資訊安全風險也會有所不同。例如:公有雲具有多租用戶的架構,除了來自網路的威脅外,還須面臨其他雲端租戶與雲端服務供應商員工的內部威脅,同時還須考量資料實際的存放地點是否符合法律規範,而私有雲則較無以上風險;混合雲則因同時使用公有雲及私有雲架構,在資訊安全的評估上將更加複雜。

''確認相關資料與應用程式的重要性''

企業選定欲使用的雲端服務種類後,接下來則是確認該雲端服務使用到相關資料與應用程式的重要性。該雲端服務相關資料與應用程式的重要性不高,那麼標準的資訊安全等級即可滿足企業對於該雲端服務的要求;倘若該雲端服務相關資料與應用程式具有高度重要性,例如:客戶的個人資料,為了避免在2010年4月通過的新版個資法下須負擔的高額民事賠償金額以及刑事責任外,企業要求的資訊安全等級勢必會提高。

企業應對相關資料與應用程式後續可能產生的風險以及企業是否能夠承受該風險所帶來的衝擊進行衡量,如果相關資料與應用程式的重要性甚高,企業對於可能造成的衝擊無法承擔時,則必須重新考量採用的雲端服務種類是否恰當。

''進行整體風險分析''

確認相關資料與應用程式的重要性所帶來的風險與衝擊後,企業應對採用的雲端服務進行整體性的風險評估。一般而言,雲端服務資訊安全主要的考量因素有以下幾項:資料的安全性(如:使用者存取管理、資料加密管理、資料傳輸管理、各用戶資料彼此間的隔離、資料實際儲存位置)、雲端運算服務供應商法規遵循的可靠性(如:營運方式是否符合法規要求)、服務中斷時的回復能力(如:回復速度、回復程度)、對企業需求的支援能力(如:協助企業進行調查、雲端運算服務提供商之間的轉換)、永續經營(如:是否會突然終止服務、終止服務後協助原用戶移轉服務的能力)。

根據上述項目,企業可搭配資訊安全等級的要求以及所處產業的相關法規,針對各項目進行延伸或修改以制定出符合該企業雲端資訊安全需求的風險評核表。而企業在制定風險評核表時必須清楚地了解採用雲端服務後工作流程的改變,發掘各環節可能發生的風險,以提高風險評核表的完整性與周延性。

''尋找合適業者''

當企業量身打造出符合需求的風險評核表後,企業即可依此對各雲端運算服務提供商進行篩選。首先,企業可以參考雲端服務供應商是否具備相關認證以證明其能力,例如:由雲端安全聯盟(Cloud Security Alliance)所提供的CCSK(Certificate of Cloud Security Knowledge)認證。針對風險評核表中各項考量因素,雲端運算服務供應商均應提供完善且直接對應的管理能力以及事後補救方案。當企業挑選出合適的雲端運算服務提供商後,企業需與其簽訂服務水準協議(SLA,Service Level Agreement),其中除明訂雲端運算服務供應商各項服務應提供的水準(如:連線速度、斷線頻率、以及違反時應給付予企業的賠償外,對於各項疏失所造成的責任亦應清楚分擔。

''定期追蹤考核''

與選定的雲端服務提供商簽訂契約後,企業便可依照此契約使用該業者提供的雲端服務,但對於雲端服務資訊安全的評估仍應持續進行。由於會因為資訊技術的改善、法規條文的頒布、企業業務範圍的調整等內在或外在因素的改變,企業對於採用的雲端服務資訊安全的要求也會有所改變,因此,企業應定期檢視並適時修改風險評核表,以確保該雲端服務的資訊安全等級隨時保持在企業要求水準之上。
結論

雲端服務改變了以往的商業模式,採用雲端服務可使企業以更符合成本效益的方式經營,節省資訊軟、硬體及維護費用,企業毋須管理繁雜的IT設施,可以更專注於核心價值的經營與創造,對於資源有限且資訊人力不足的中小企業而言,效果更為明顯。

雖然採用雲端運算服務對於企業有諸多好處,但根據市場上採用的情況來看,雲端運算服務仍然處於市場的萌芽期。目前採用的企業多為大型企業或是跨國集團,由於大規模企業對運算能力有較高的需求,雖市場上有業者以雲端運算方式提供服務,但大規模企業傾向由雲端業者協助輔導建立企業內部私有雲。大規模企業對於資訊系統投資建置的資源較多,因此具有足夠的能力可以建置私有雲,但不可否認的,私有雲具備較高水準的資訊安全也是企業採用私有雲相當重要的考量因素。???

以上提出雲端服務資訊安全的評估流程,從選定採用雲端服務的種類、確認相關資料與應用程式的重要性、進行整體風險分析、建立風險評核表、尋找合適業者到最後地定期追蹤考核,可以協助企業使用者對於雲端服務資訊安全的評估有更清楚的認識。但這些流程步驟當中相當重要的一點是雲端業者提供的資訊透明度,雲端業者可以提供越透明越即時的資訊,企業使用者則可以隨時掌握現在採用雲端服務的服務水準以及是否有安全上的疑慮。目前全球領先的SaaS雲端業者 Salesforce.com,提供全球企業CRM雲端軟體服務,架構了提供企業使用者資訊的網站trust.salesforce.com,企業使用者可以從網站上看到即時的服務水準資訊,可以增加企業使用者對於資訊安全評估的準確性以及對雲端業者的信賴程度。

雲端服務背後的資訊架構以及流程相較於傳統資訊架構複雜許多,企業須面對的資訊安全問題也相對較多。因此,企業在採用雲端服務前,需按步驟對整體雲端服務採用做全面性的風險評估,了解各環節潛藏的資訊安全風險,以便做事前預防。

同時,對於潛在風險可能造成的衝擊,企業亦需與雲端服務提供商明確地分攤責任並共同制定事後的補救方案以降低帶來的傷害,如此企業才能安全無慮地享受雲端服務所帶來的效益。

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
BlackBerry 官網 : http://na.blackberry.com

{{item1{BlackBerry OS}}}
BlackBerry OS is a proprietary mobile operating system, developed by Research In Motion for its BlackBerry line of smartphone handheld devices. The operating system provides multitasking and supports specialized input devices that have been adopted by RIM for use in its handhelds, particularly the trackwheel, trackball, and most recently, the trackpad and touchscreen.

The BlackBerry platform is perhaps best known for its native support for corporate email, through MIDP 1.0 and, more recently, a subset of MIDP 2.0, which allows complete wireless activation and synchronization with Microsoft Exchange, Lotus Domino, or Novell GroupWise email, calendar, tasks, notes, and contacts, when used in conjunction with BlackBerry Enterprise Server. The operating system also supports WAP 1.2.

Updates to the operating system may be automatically available from wireless carriers that support the BlackBerry OTASL (over the air software loading) service.

Third-party developers can write software using the available BlackBerry API (application programming interface) classes, although applications that make use of certain functionality must be digitally signed.

''參考文章''
1. HDFS File System Shell Guide
http://hadoop.apache.org/common/docs/r0.20.0/hdfs_shell.html
2. The Hadoop Distributed File System
http://developer.yahoo.com/hadoop/tutorial/module2.html
3. Mount hdfs on Linux Ubuntu 10.04 Server x86-64 
http://clhjoe.blogspot.com/2010/11/mount-hdfs-on-linux-ubuntu-1004-server.html

{{item1{HDFS 命令操作}}}

''1. 建立目錄''
{{{
$ cd /mnt/hda1/hadoop-1.0.3
$ start-dfs.sh

$ hadoop dfs -ls  /    
Found 1 items
drwxr-xr-x   - root supergroup          0 2013-04-13 14:02 /mnt

$ hadoop dfs -mkdir  /user

$ hadoop dfs -ls  /   
Found 2 items
drwxr-xr-x   - root supergroup          0 2012-06-09 22:36 /mnt
drwxr-xr-x   - root supergroup          0 2012-06-09 22:40 /user
}}}

''[註]'' 如 Name Node 還沒準備好, 這時會出現以下錯誤訊息 :
mkdir: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot create directory /user. Name node is in safe mode.

''2. 上載檔案''
{{{
$ hadoop dfs -copyFromLocal *.txt /user

$ hadoop dfs -ls /user
Found 4 items
-rw-r--r--   1 student supergroup          0 2011-06-12 00:32 /user/CHANGES.txt
-rw-r--r--   1 student supergroup          0 2011-06-12 00:32 /user/LICENSE.txt
-rw-r--r--   1 student supergroup          0 2011-06-12 00:32 /user/NOTICE.txt
-rw-r--r--   1 student supergroup          0 2011-06-12 00:32 /user/README.txt
}}}

''3. 顯示檔案內容''
{{{
# hadoop dfs -cat /user/NOTICE.txt
This product includes software developed by The Apache Software
Foundation (http://www.apache.org/).
}}}

''4. 檢視檔案儲存資訊''
{{{
# hadoop fsck /user/NOTICE.txt -files -blocks -locations
FSCK started by root from /192.168.100.20 for path /user/NOTICE.txt at Thu Nov 03 17:26:50 UTC 2011
/user/NOTICE.txt 101 bytes, 1 block(s):  OK
0. blk_-6938191868505178697_1006 len=101 repl=1 [192.168.100.20:50010]

Status: HEALTHY
 Total size:	101 B
 Total dirs:	0
 Total files:	1
 Total blocks (validated):	1 (avg. block size 101 B)
 Minimally replicated blocks:	1 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	0 (0.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	1
 Average block replication:	1.0
 Corrupt blocks:		0
 Missing replicas:		0 (0.0 %)
 Number of data-nodes:		1
 Number of racks:		1
FSCK ended at Thu Nov 03 17:26:50 UTC 2011 in 5 milliseconds

The filesystem under path '/user/NOTICE.txt' is HEALTHY
}}}

''5. 取回檔案''
The get command is the inverse operation of put; it will copy a file or directory (recursively) from HDFS into the target of your choosing on the local file system. A synonymous operation is called -copyToLocal.
{{{
# hadoop dfs -get /user/README.txt a.txt
# ll a.txt
-rw-r--r-- 1 root root 1366 2011-11-05 21:40 a.txt
}}}

''6. 檢視儲存目錄''
{{{
# tree -h data
data
└── [4.0K]  dfs
    ├── [4.0K]  data
    │ ├── [4.0K]  current
    │ │ ├── [ 101]  blk_-4122798314015182175
    │ │ ├── [  11]  blk_-4122798314015182175_1003.meta
    │ │ ├── [ 13K]  blk_-5269488340386322561
    │ │ ├── [ 115]  blk_-5269488340386322561_1002.meta
    │ │ ├── [1.3K]  blk_6763453011940087088
    │ │ ├── [  19]  blk_6763453011940087088_1004.meta
    │ │ ├── [403K]  blk_9063958834424841508
    │ │ ├── [3.2K]  blk_9063958834424841508_1001.meta
    │ │ ├── [ 290]  dncp_block_verification.log.curr
    │ │ └── [ 159]  VERSION
    │ ├── [4.0K]  detach
    │ ├── [   0]  in_use.lock
    │ ├── [ 157]  storage
    │ └── [4.0K]  tmp
    ├── [4.0K]  name
    │ ├── [4.0K]  current
    │ │ ├── [   4]  edits
    │ │ ├── [ 543]  fsimage
    │ │ ├── [   8]  fstime
    │ │ └── [ 101]  VERSION
    │ ├── [4.0K]  image
    │ │ └── [ 157]  fsimage
    │ ├── [   0]  in_use.lock
    │ └── [4.0K]  previous.checkpoint
    │     ├── [   4]  edits
    │     ├── [ 110]  fsimage
    │     ├── [   8]  fstime
    │     └── [ 101]  VERSION
    └── [4.0K]  namesecondary
        ├── [4.0K]  current
        │ ├── [   4]  edits
        │ ├── [ 543]  fsimage
        │ ├── [   8]  fstime
        │ └── [ 101]  VERSION
        ├── [4.0K]  image
        │ └── [ 157]  fsimage
        └── [   0]  in_use.lock

12 directories, 28 files
}}}

''7. 刪除檔案''
{{{
$ hadoop dfs -rm /user/*.txt
Deleted hdfs://HDP120:9000/user/CHANGES.txt
Deleted hdfs://HDP120:9000/user/LICENSE.txt
Deleted hdfs://HDP120:9000/user/NOTICE.txt
Deleted hdfs://HDP120:9000/user/README.txt
}}}

''8. 停止 HDFS''
{{{
# stop-dfs.sh
}}}

<<toBalaNotes "1">>

{{item1{HDFS 管理命令}}}

''1. 啟動 HDFS 系統資訊''
{{{
# start-dfs.sh     
starting namenode, logging to /mnt/hda1/hadoop-0.20.203.0/bin/../logs/hadoop-root-namenode-HDP120.out
HDP120: starting datanode, logging to /mnt/hda1/hadoop-0.20.203.0/bin/../logs/hadoop-root-datanode-HDP120.out
HDP120: starting secondarynamenode, logging to /mnt/hda1/hadoop-0.20.203.0/bin/../logs/hadoop-root-secondarynamenode-HDP120.out
}}}

''2. 第一次顯示 HDFS 系統資訊''
{{{
# hadoop dfsadmin -report   
Safe mode is ON
Configured Capacity: 0 (0 KB)
Present Capacity: 0 (0 KB)
DFS Remaining: 0 (0 KB)
DFS Used: 0 (0 KB)
DFS Used%: ?%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0

-------------------------------------------------
Datanodes available: 0 (0 total, 0 dead)
}}}

''[註]'' HDFS 如剛啟動會顯示 Safe mode is ON, 強迫離開 Safe Mode 的命令為 "hadoop dfsadmin -safemode leave"

''3. 再次顯示 HDFS 系統資訊''
{{{
# hadoop dfsadmin -report
Configured Capacity: 4226125824 (3.94 GB)
Present Capacity: 3697659904 (3.44 GB)
DFS Remaining: 3697623040 (3.44 GB)
DFS Used: 36864 (36 KB)
DFS Used%: 0%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0

-------------------------------------------------
Datanodes available: 1 (1 total, 0 dead)

Name: 192.168.100.20:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 36864 (36 KB)
Non DFS Used: 528465920 (503.98 MB)
DFS Remaining: 3697623040(3.44 GB)
DFS Used%: 0%
DFS Remaining%: 87.49%
Last contact: Sat Nov 05 10:40:56 UTC 2011
}}}

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
{{item1{Hadoop 1.x 資料作業系統}}}
{{op1{1. 啟動 Hadoop 1.x}}}
''student@UBDOS1:~$ cd kvmhdfs1.0/
student@UBDOS1:~/kvmhdfs1.0$ sudo ./hdfs.sh''
{{{
建構 SH100 網路
--------------------------
SH100 建立成功
  SH100-NET 建立成功
  SH100P1 建立成功
  SH100P2 建立成功
  SH100P3 建立成功
  SH100P4 建立成功
  SH100R1 建立成功
  SH100R2 建立成功

啟動 HDFS 系統
--------------------------
NN(768 M) 啟動成功
SN(512 M) 啟動成功
DN01(512 M) 啟動成功
DN02(512 M) 啟動成功
HDFS:mapreduce> 
}}}
{{op1{2. 編輯 /etc/hosts}}}
''HDFS:mapreduce> sudo nano /etc/hosts''
{{{
127.0.0.1       localhost
127.0.1.1       UBDOS1
172.16.100.10 NN
172.16.100.11 SN
172.16.100.12 DN01
172.16.100.13 DN02
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
}}}
{{op1{3. 檢視 Hadoop 1.x 系統}}}
''HDFS:mapreduce> hadoop dfsadmin -report''
{{{
Configured Capacity: 4227252224 (3.94 GB)
Present Capacity: 3306016768 (3.08 GB)
DFS Remaining: 3305934848 (3.08 GB)
DFS Used: 81920 (80 KB)
DFS Used%: 0%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
-------------------------------------------------
Datanodes available: 2 (2 total, 0 dead)

Name: 172.16.100.12:50010
Decommission Status : Normal
Configured Capacity: 2113626112 (1.97 GB)
DFS Used: 40960 (40 KB)
Non DFS Used: 460738560 (439.39 MB)
DFS Remaining: 1652846592(1.54 GB)
DFS Used%: 0%
DFS Remaining%: 78.2%
Last contact: Sat Jul 26 10:21:39 CST 2014

Name: 172.16.100.13:50010
Decommission Status : Normal
Configured Capacity: 2113626112 (1.97 GB)
DFS Used: 40960 (40 KB)
Non DFS Used: 460496896 (439.16 MB)
DFS Remaining: 1653088256(1.54 GB)
DFS Used%: 0%
DFS Remaining%: 78.21%
Last contact: Sat Jul 26 10:21:38 CST 2014
}}}
{{op1{4. 啟動 NAT 主機}}}
啟動一個新的終端機, 然後執行以下命令 :
''student@UBDOS1:~$ cd kvmhdfs1.0/
student@UBDOS1:~/kvmhdfs1.0$ sudo ./kvmrouter.sh RBR0.100''
{{{
NAT100(128 M) 啟動成功
}}}
{{op1{5. 測通外網}}}
在 dn01 主機執行以下命令 :
''root@NN:/mnt/sda1/hadoop-1.0.4# ping www.hinet.net''
{{{
PING www.hinet.net (202.39.253.11): 56 data bytes                               
64 bytes from 202.39.253.11: seq=0 ttl=127 time=43.744 ms                       
64 bytes from 202.39.253.11: seq=1 ttl=127 time=69.929 ms                       
64 bytes from 202.39.253.11: seq=2 ttl=127 time=54.467 ms                       
64 bytes from 202.39.253.11: seq=3 ttl=127 time=101.870 ms                      
                                                                                     
--- www.hinet.net ping statistics ---                                           
4 packets transmitted, 4 packets received, 0% packet loss                       
round-trip min/avg/max = 43.744/67.502/101.870 ms 
}}}
<<toBalaNotes "1">>


///%1
//%/
''參考文章''
1. Introduction to AppArmor
http://ubuntuforums.org/showthread.php?t=1008906
2. QEMU/Networking (很完整)
http://en.wikibooks.org/wiki/QEMU/Networking
3. QEMU Documentation/Networking
http://wiki.qemu.org/Documentation/Networking
4. Ubuntu 12.04 LTS Precise Pangolin: Networking tips and tricks (很重要)
http://www.sitepoint.com/ubuntu-12-04-lts-precise-pangolin-networking-tips-and-tricks/
5. QEMU Emulator User Documentation (重要文件)
http://wiki.qemu.org/download/qemu-doc.html
6. 20 Linux System Monitoring Tools Every SysAdmin Should Know
http://www.cyberciti.biz/tips/top-linux-monitoring-tools.html

{{item1{QEMU 網路設定}}}
使用 kvm 命令設定虛擬電腦的單一網卡, 需由以下二個參數完成
{{{
-net nic       -net user 
    |              | 
第一個參數       第二個參數
}}}

''第一個參數 (網卡設定)'' 
主要設定虛擬電腦的網卡, 內定網卡型號為 e1000, 也可透過此參數設定網卡 MAC 號碼

‘-net nic[,vlan=n][,macaddr=mac][,model=type][,name=name][,addr=addr][,vectors=v]’
{{{
Create a new Network Interface Card and connect it to VLAN n (n = 0 is the default). The NIC is an e1000 by default on the PC target. Optionally, the MAC address can be changed to mac, the device address set to addr (PCI cards only), and a name can be assigned for use in monitor commands. Optionally, for PCI cards, you can specify the number v of MSI-X vectors that the card should have; this option currently only affects virtio cards; set v = 0 to disable MSI-X. If no ‘-net’ option is specified, a single NIC is created. Qemu can emulate several different models of network card. Valid values for type are virtio, i82551, i82557b, i82559er, ne2k_pci, ne2k_isa, pcnet, rtl8139, e1000, smc91c111, lance and mcf_fec. Not all devices are supported on all targets. Use -net nic,model=? for a list of available devices for your target.
}}}

''第二個參數 (後端運作 : Backend)''
主要設定後端網路的運作模式. 可使用的運作模式如下 :

‘-net user[,option][,option][,...]’
‘-net tap[,vlan=n][,name=name][,fd=h][,ifname=name][,script=file][,downscript=dfile]’
‘-net socket[,vlan=n][,name=name][,fd=h][,listen=[host]:port][,connect=host:port]’
‘-net vde[,vlan=n][,name=name][,sock=socketpath][,port=n][,group=groupname][,mode=octalmode]’
‘-net none’

''[註]'' Legacy stand-alone options ''-tftp, -bootp, -smb and -redi''r are still processed and applied to ''-net user''. Mixing them with the new configuration syntax gives undefined results. Their use for new applications is discouraged as they will be removed from future versions. 
<<toBalaNotes "1">>
{{item1{User 後端運作模式 (內定)}}}

''1. 啟動虛擬電腦''
{{{
$ kvm -name tsc32 -m 512  -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic -net user
}}}
上面命令, 所啟動的虛擬電腦, 其 IP 及 MAC 內定為 10.0.2.15/52:54:00:12:34:56, 並內定關閉 ping 功能

''2. 撰寫 startvm.sh 程式''
{{{
#!/bin/bash

[ "$#" != "1" ] && echo "startvm.sh name" && exit 1

case "$1" in
   "tsc32")
      kvm -name tsc32 -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic -net user
      ;;
    "tsc64")
      kvm -name tsc64 -m 128 -kernel kernel/vmlinuz64 -initrd kernel/core64.gz -net nic -net user
      ;;
     *)
      ;;
esac
exit 0
}}}

''3. 執行 startvm.sh''
{{{
$ ./startvm.sh tsc32 &
}}}

''以文字模式啟動虛擬電腦''
{{{
-nographic : Normally, QEMU uses SDL to display the VGA output. With this option, you can totally disable graphical output so that QEMU is a simple command line application. The emulated serial port is redirected on the console. Therefore, you can still use QEMU to debug a Linux kernel with a serial console.

-curses : Normally, QEMU uses SDL to display the VGA output. With this option, QEMU can display the VGA output when in text mode using a curses/ncurses interface. Nothing is displayed in graphical mode.
}}}
<<toBalaNotes "2">>
{{item1{User 後端運作模式 (自訂 Network ID)}}}
''1. 啟動虛擬電腦''
{{{
$ kvm -name tsc32 -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic -net user,net=192.168.0.0/24,host=192.168.0.5,restrict=no
}}}

''命令參數說明''
"net=192.168.0.0/24" : 指定虛擬網路使用 192.168.0.0/24 這個 Network ID (Class C), 虛擬電腦的網卡會被指定 192.168.0.15
"host=192.168.0.5" : 指定 Host 的 IP 位址為 192.168.0.5 
"restrict=no" : 虛擬電腦可以 ping 到 Host IP

''[註]'' 虛擬電腦的 Default Gateway 與 Host IP 一樣, DNS IP 則是指定 Network ID 中的第三個 IP (192.168.0.3)

''@@color:blue;
問題 1 : 所啟動的虛擬電腦可否上網 ?  (telnet)


問題 2 : 可否指定所啟動的虛擬電腦的 IP 位址 ?  (dhcpstart="192.168.0.5")

@@''
<<toBalaNotes "3">>

{{item1{User + Socket 混搭後端運作模式}}}
此種運作模式是所有虛擬電腦, 連接在同一 vlan (hub) 網段中, 內網主機使用撥號網路 (SLIRP) 連接方式, 連接到 Gateway 主機. 內網主機需連接各自專屬的連接埠 (port), 這樣在執行 ping 命令時, 才不會有重覆封包回應.

''1. 啟動 Gateway 主機''
{{{
$ kvm -name "gateway" -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic,macaddr="52:54:00:12:34:5" -net user,net="192.168.0.0/24",restrict=no,host="192.168.0.1",dhcpstart="192.168.0.5" -net socket,listen=:"8012" -net socket,listen=:"8013"  
}}}

''2. 啟動第一部內網主機''
{{{
$ kvm -name "client1" -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic,macaddr="52:54:00:12:34:12" -net socket,connect=127.0.0.1:8012
}}}

''3. 啟動第二部內網主機''
{{{
$ kvm -name "client2" -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic,macaddr="52:54:00:12:34:13" -net socket,connect=127.0.0.1:8013
}}}

''SLIRP 說明''
{{{
Slirp (sometimes capitalized SLiRP) is a software program that emulates a PPP, SLIP, or CSLIP connection to the Internet via a shell account. It is largely obsolete for its original purpose, as dedicated dial-up PPP connections and broadband Internet access have in turn become widely available and inexpensive. However, it remains useful for connecting mobile devices, such as PDAs, via their serial ports.
}}}

{{item1{Multicast}}}
An extension of this concept is to connect vlans together using a multicast socket. For example, with:
{{{
 $> qemu -net nic -net socket,mcast=230.0.0.1:1234 ...
 $> qemu -net nic -net socket,mcast=230.0.0.1:1234 ...
}}}
you have two guests with their vlan 0 connected together over a multicase bus. Any number of guests can connect to the same multicast address and receive the frames sent by any guest to that vlan. 
<<toBalaNotes "4">>

{{item1{撰寫 startvm.sh 程式 - 支援 Socket 後端運作模式}}}

''$ cat startvm.sh''
{{{
#!/bin/bash

[ "$#" != "2" ] && echo "startvm.sh mode mac" && exit 1

k32="kernel/vmlinuz"
rd32="kernel/core.gz"
k64="kernel/vmlinuz64"
rd64="kernel/core64.gz"

# 上網主機設定 (Server)
# guestip 設定 DHCP 第一個配置的 IP 位址
# DNS IP 為 192.168.0.3, Default Gateway 為 192.168.0.1
netid="192.168.0.0/24"
hostip="192.168.0.1"
guestip="192.168.0.5"
p1="8012"
p2="8013"
mac="52:54:00:12:34:$2"

name="$1@$mac"

case "$1" in
    "server32")
       p=$(ps aux)
       echo $p | grep "\-name server" &>/dev/null
       [ "$?" == "0" ] && echo -e "\nServer 已啟動\nPress any to continue " && exit 1 

       kvm -name "$name" -m 128 -kernel "$k32" -initrd "$rd32" -net nic,macaddr="$mac" -net user,net="$netid",restrict=no,host="$hostip",dhcpstart="$guestip" -net socket,listen=:"$p1" -net socket,listen=:"$p2"  
      ;;
    "server64")
       p=$(ps aux)
       echo $p | grep "\-name server" &>/dev/null
       [ "$?" == "0" ] && echo -e "\nServer 已啟動\nPress any to continue " && exit 1 
       kvm -name "$name" -m 128 -kernel "$k64" -initrd "$rd64" -net nic,macaddr="$mac" -net user,net="$netid",restrict=no,host="$hostip",dhcpstart="$guestip" -net socket,listen=:"$p1" -net socket,listen=:"$p2" 
       ;;
    "client32")
       kvm -name "$name" -m 128 -kernel "$k32" -initrd "$rd32" -net nic,macaddr="$mac" -net socket,connect=127.0.0.1:80"$2"
       ;;
    "client64")
       kvm -name "$name" -m 128 -kernel "$k32" -initrd "$rd32" -net nic,macaddr="$mac" -net socket,connect=127.0.0.1:80"$2"
       ;;
     *)
       echo "無法處理" 
       exit 1 
       ;;
esac
exit 0
}}}

''執行 startvm.sh''
{{{

# 啟動連外主機
$ ./startvm.sh server64 5 &

# 啟動第一部內網主機
$ ./startvm.sh client64 12 &

# 啟動第二部內網主機
$ ./startvm.sh client64 13 &
}}}
<<toBalaNotes "5">>
///%1
//%/

///%2
//%/

///%4
//%/

///%3
//%/

///%5
//%/



{{item1{自定監控資訊格式}}}
{{{
$ inotifywait -rmq --timefmt "%m_%d_%H_%M_%S" --format "%w %f %e %T" -e create /tmp/ABird &
[1] 1183
$ touch /tmp/ABird/z
/tmp/ABird/ z CREATE 06_18_15_01_03
}}}

''&ndash;-format 參數說明''
%w : 監控檔案的完整目錄名稱
%f : 監控檔案的名稱
%e : 監控的事件名稱
%Xe : Replaced with the Event(s) which occurred, separated by whichever character is in the place of 'X'. 
%T : Replaced with the current Time in the format specified by the &ndash;-timefmt option, which should be a format string suitable for passing to strftime(3).

{{item1{執行監控程式}}}

''1. 撰寫程式''
{{{
$ nano testInotify.sh
#!/bin/bash
inotifywait -rmq --timefmt "%m_%d_%H_%M_%S" --format "%w %f %e %T" -e create $1 |
while read p f a t;
do 
   echo "$p$f" $a $t 
   echo ""
done

$ chmod +x testInotify.sh 
}}}

''2. 執行程式''
{{{
$ ./testInotify.sh /tmp/ABird/ &
[1] 1201
}}}

''3. 測試程式''
{{{
$ touch /tmp/ABird/z
$ touch /tmp/ABird/v
/tmp/ABird/v CREATE 06_18_15_37_40
}}}

''4. 結束程式執行''
{{{
# killall -9 inotifywait
[1]+  Done                    ./testInotify.sh  /tmp/ABird/
}}}

<<toBalaNotes "1">>

{{item1{上載新增檔案至 HDFS 系統}}}

''1. 撰寫程式''
{{{
$ nano new2HDFS.sh
#!/bin/bash
export JAVA_HOME=/usr/lib/jvm/java-6-sun
export HADOOP_HOME=/root/hadoop-0.20.203.0
export PATH=$PATH:$HADOOP_HOME/bin

inotifywait -rmq --timefmt "%m_%d_%H_%M_%S" --format "%w %f %e %T" -e create $1 | 
while read p f a t;
do 
   echo "$p$f" $a $t 
   hadoop dfs -put "$p$f" /user
   echo ""
done

$ chmod +x new2HDFS.sh
}}}

''2. 執行程式''
{{{
$  ./new2HDFS.sh  /tmp/ABird/  &
}}}

''3. 測試程式''
{{{
$  touch /tmp/ABird/xy
}}}

''4. 結束程式執行''
{{{
# killall -9 inotifywait
[1]+  Done                    ./new2HDFS.sh /tmp/ABird/
}}}

<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
請在 [admins] 輸入 Username = Password 管理帳號, 輸入密碼後, CouchDB 重啟後會自動計算密碼

''1. 修改 /etc/couchdb/local.ini 設定檔''
{{{
$ sudo nano /etc/couchdb/local.ini

; CouchDB Configuration Settings
; Custom settings should be made in this file. They will override settings
; in default.ini, but unlike changes made to default.ini, this file won't be
; overwritten on server upgrade.

[couchdb]
;max_document_size = 4294967296 ; bytes

[httpd]
port = 5984
bind_address = 192.168.32.137

[log]
;level = debug

[couch_httpd_auth]
;secret = replace this with a real secret


[update_notification]
;unique notifier name=/full/path/to/exe -with "cmd line arg"

; To create an admin account uncomment the '[admins]' section below and add a
; line in the format 'username = password'. When you next start CouchDB, it
; will change the password to a hash (so that your passwords don't linger
; around in plain-text files). You can add more admin accounts with more
; 'username = password' lines. Don't forget to restart CouchDB after
; changing this.
[admins]
admin = 2Password!   (新增管理帳號)
}}}

''2. 重新啟動 CouchDb''
{{{
# /etc/init.d/couchdb restart
}}}

''3. 再次檢視 /etc/couchdb/local.ini 檔案內容''
{{{
                                           :
                                           :
; To create an admin account uncomment the '[admins]' section below and add a
; line in the format 'username = password'. When you next start CouchDB, it
; will change the password to a hash (so that your passwords don't linger
; around in plain-text files). You can add more admin accounts with more
; 'username = password' lines. Don't forget to restart CouchDB after
; changing this.
[admins]
tobala = -hashed-f2f1c72222d7fb0c1a98f7402e3a60d193f508a1,79c108c7e1415d4056a0b$   (新增管理帳號)
}}}

<<toBalaNotes "admin">>


///%admin
//%/
''參考文章''
1. How to create windows image(KVM) supports virtio 
http://www-01.ibm.com/support/docview.wss?uid=swg21587905
 
{{item1{Virtio 週邊裝置}}}
''Virtio block device'' is a ''para-virtualized device'' for kvm guest. It is different from normal emulated hard drive, because it is simply faster. This small how-to is about how to make a disk para-virtualized and boot from it. 

''[註 1]'' Linux kernel 從 2.6.25 版本開始已內建 virtio_* 驅動程式.
''[註 2]'' current debian lenny default kernel (2.6.26-2-amd64) will not give any output when running "dmesg | grep virtio" 

''Windows 虛擬電腦安裝硬碟 Virtio 驅動程式''

1. 建立一部 Windows 虛擬電腦
2. 下載 Redhat 的 Windows Virtio 週邊裝置驅動程式 
請到 http://alt.fedoraproject.org/pub/alt/virtio-win/ 這網址, 下載 virtio-win-0.x-xx.iso 這光碟檔

3. 替 Windows 虛擬電腦, 新增一個 virtio 的硬碟
此硬碟只是暫時使用, 所以只需給 512M, 如下圖 :

[img[img/winvirtio01.png]]

4. 將 virtio-win-0.x-xx.iso 這光碟檔, 指定給 Windows 虛擬電腦新增的光碟機

[img[img/winvirtio02.png]]

5. 啟動  Windows 虛擬電腦, 安裝新硬碟的 Virtio 驅動程式

[img[img/winvirtio03.png]]

[img[img/winvirtio04.png]]

[img[img/winvirtio05.png]]

''Windows 虛擬電腦的光碟機目錄內容''
''wxp:'' disk driver for Windows XP
''wnet:'' disk driver for Windows 2003
''wlh:'' disk driver for Windows 2008
''xp:'' network driver for Windows XP/2003
''vista:'' network driver for Windows Vista/7/2008

[img[img/winvirtio06.png]]

6. 關閉 Windows 虛擬電腦, 移除暫時新增一個 virtio 的硬碟, 然後將原先系統啟動硬碟的型態改為 Virtio, 如下圖 :

[img[img/winvirtio07.png]]

<<toBalaNotes "1">>

{{item1{Windows 虛擬電腦安裝網卡 Virtio 驅動程式}}}

1. 關閉 Windows 虛擬電腦, 將原先系統網卡的型態改為 Virtio, 如下圖 :

[img[img/winvirtio08.png]]

2. 啟動 Windows 虛擬電腦, 此時 Windows 系統會自動安裝網卡的 Virtio 驅動程式

''[註 1]'' virtio-win-0.x-xx.iso 這光碟檔, 必須指定給 Windows 虛擬電腦的光碟機

<<toBalaNotes "2">>
{{item1{NBD 儲存目錄建立}}}
在 Virt-Manager 工具中建立 NBD 儲存目錄, 操作步驟如下 :

[img[img/kvm/nbds01.png]]

[img[img/kvm/nbds02.png]]

[img[img/kvm/nbds04.png]]

[img[img/kvm/nbds03.png]]

{{item1{在 NBD 儲存目錄產生虛擬硬碟檔}}}

[img[img/kvm/nbds05.png]]

[img[img/kvm/nbds06.png]]

{{item1{虛擬主機使用 NBD 虛擬硬碟檔}}}

[img[img/kvm/nbds07.png]]

<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/

''參考文章''
1. 15 Practical Linux cURL Command Examples (一定要看) 
http://www.thegeekstuff.com/2012/04/curl-examples/

{{item1{建置網站目錄架構}}}
{{{
$ mkdir ~/www
$ mkdir ~/www/img
$ mkdir ~/www/js
}}}

{{item1{首頁設計 (~/www/index.html)}}}
{{{
$ nano ~/www/index.html
<html>
<head>
<script type="text/javascript" src="js/weatherimg.js"></script>
</head>
<body onload="show()">
<div id="weather">陰天</div>
</body>
</html>
}}}

{{item1{網頁程式設計 : ~/www/js/weatherimg.js}}}
{{{
function show() {
   var x = document.getElementById('weather');

   if (x.innerHTML == "晴天")
      x.innerHTML="<img src='./img/01.png' />";
   else if (x.innerHTML == "下雨")
      x.innerHTML="<img src='./img/25.png' />";
   else if (x.innerHTML == "陰天")
      x.innerHTML="<img src='./img/08.png' />";
}
}}}

<<toBalaNotes "1">>

{{item1{上載網站資料}}}
{{{
$ cd ~/www

$ scp -r  .  root@172.30.99.5:/var/www 
}}}

''[註]'' 需替 root 帳號, 設定密碼, 命令如下 :
{{{
$ sudo passwd root
}}}

<<toBalaNotes "2">>


///%1
//%/

///%2
//%/
1. Hadoop 叢集安裝
http://trac.nchc.org.tw/cloud/wiki/NCHCCloudCourse100802/Lab5
2. Distributed data processing with Hadoop, Part 2: Going further
http://www.ibm.com/developerworks/linux/library/l-hadoop-2/
3. Hadoop Default Ports Quick Reference
http://blog.cloudera.com/blog/2009/08/hadoop-default-ports-quick-reference/

{{item1{The distributed Hadoop architecture}}}

''Figure 1. Hadoop master and slave node decomposition''
 
[img[img/kvm/hdmulti.jpeg]]

As shown in Figure 1, the master node consists of the namenode, secondary namenode, and jobtracker daemons (the so-called master daemons). In addition, this is the node from which you manage the cluster for the purposes of this demomonstration (using the Hadoop utility and browser). The slave nodes consist of the tasktracker and the datanode (the slave daemons). The distinction of this setup is that the master node contains those daemons that provide management and coordination of the Hadoop cluster, where the slave node contains the daemons that implement the storage functions for the Hadoop file system (HDFS) and MapReduce functionality (the data processing function).

For this demonstration, you create a master node and two slave nodes sitting on a single LAN. This setup is shown in Figure 2. Now, let's explore the installation of Hadoop for multinode distribution and its configuration.

''Figure 2. Hadoop cluster configuration''

[img[img/kvm/hdmulti01.jpeg]]

To simplify the deployment, you employ virtualization, which provides a few advantages. Although performance may not be advantageous in this setting, using virtualization, it's possible to create a Hadoop installation, and then clone it for the other nodes. For this reason, your Hadoop cluster should appear as follows, running the master and slave nodes as virtual machines (VMs) in the context of a hypervisor on a single host (see Figure 3).

''Figure 3. Hadoop cluster configuration in a virtual environment''

[img[img/kvm/hdmulti02.gif]]

''Relationship of the start scripts and daemons for each node ''

[img[img/kvm/hdsh.jpeg]]

<<toBalaNotes "1">>
{{item1{第一階段 : HDFS 網路設定}}}

''1. 登入 HDP120''
{{{
$ sudo virsh console HDP120
Connected to domain HDP120
Escape character is ^]
}}}

''2. 設定網路 : 在 HDP120 主機執行''

__編輯 /etc/hosts 名稱設定檔__

輸入所有主機 IP 及名稱
{{{
$ nano /etc/hosts
127.0.0.1  localhost
192.168.100.20 HDP120
192.168.100.21 HDP121
192.168.100.22 HDP122
}}}
@@color:red;''[註]'' 確認 HDP120 不可指定 127.0.0.1 這 Loop Back 位址, 如指定會造成 DataNode 無法連接到 NameNode@@

__使用 scp 命令, 將 /etc/hosts 複製到其他 Hadoop 主機__
{{{
# 登入密碼為 student
$ scp /etc/hosts root@HDP121:/etc/hosts
The authenticity of host 'hdp121 (192.168.100.21)' can't be established.
ECDSA key fingerprint is d9:3b:ed:58:44:29:33:b9:7e:d7:98:89:3a:01:7c:49.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hdp121,192.168.100.21' (ECDSA) to the list of known hosts.
root@HDP121's password: 
hosts                                         100%  263     0.3KB/s   00:00    

# 登入密碼為 student
$ scp /etc/hosts root@HDP122:/etc/hosts
The authenticity of host 'hdp122 (192.168.100.22)' can't be established.
ECDSA key fingerprint is d9:3b:ed:58:44:29:33:b9:7e:d7:98:89:3a:01:7c:49.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hdp122,192.168.100.22' (ECDSA) to the list of known hosts.
root@HDP122's password: 
hosts                                         100%  263     0.3KB/s   00:00 
}}}

''3. 設定 SSH 自動登入''

以下操作在 NameNode 主機 (HDP120) 執行, 因專職 DataNode 的 Hadoop 主機, 是由 NameNode 透過 ssh 自動登入, 將之啟動,  NameNode 也要設定 ssh 自動登入, 來啟動本身 namenode, datanode 及 secondarynamenode 這三個服務.

__將 NameNode 的公用金鑰 (id_dsa.pub), 複製到 NameNode 及 DataNode 主機, 並將之更名為 authorized_keys__
{{{
# scp ~/.ssh/id_dsa.pub root@HDP121:/root/.ssh/authorized_keys
root@hdp121's password: 
id_dsa.pub                                    100%  601     0.6KB/s   00:00 

# scp ~/.ssh/id_dsa.pub root@HDP122:/root/.ssh/authorized_keys
root@hdp122's password: 
id_dsa.pub                                    100%  601     0.6KB/s   00:00  
}}}

''[註]'' HDP121 及 HDP122 主機的 /root 目錄中, 必須先產生 .ssh 目錄 (已完成)

''4. 測試自動登入, 並取得自動登入主機憑證 (@@color:red;一定要執行@@)''
必須登入 NameNode  及二部 DataNode, 執行以下命令, 需使用電腦名稱(HostName) 來登入, 不可使用 IP 位址來登入,  照以下命令操作 .ssh/known_hosts 這檔案中的憑證, 在執行 start-dfs.sh 時便可正確被使用.
''自動登入 HDP120'' 
{{{
# ssh HDP120
Linux HDP120 2.6.32-33-generic-pae #72-Ubuntu SMP Fri Jul 29 22:06:29 UTC 2011 i686 GNU/Linux
Ubuntu 10.04.4 LTS

Welcome to Ubuntu!
 * Documentation:  https://help.ubuntu.com/
Last login: Thu Aug  2 19:00:44 2012
root@HDP120:~# exit
}}}

''自動登入 HDP121'' 
{{{
$ ssh HDP121              # 不可使用 IP 位址
$ filetool.sh -b               # 因是 TinyCore 系統, 需執行此命令儲存憑證
$ exit
}}}

''自動登入 HDP122'' 
{{{
$ ssh HDP122              # 不可使用 IP 位址
$ filetool.sh -b               # 因是 TinyCore 系統, 需執行此命令儲存憑證
$ exit
}}}

''[補充] StrictHostKeyChecking is a setting at the ssh client side.''

If set to ''No'' new host keys will be automatically added to the ''known_hosts'' file, and changed host keys will be silently replaced.Setting it to ''yes'' is meant to give some protection against trojan horse attacks, but every new or changed host key must be added or replaced manually.

I'd recommend setting it to ''ask''. With this setting at least new host keys will be added automatically after user confirmation, and changed host keys will never be replaced, so security is maintained yet life becomes a bit easier in an environment where many new hosts need to be accessed.
{{{
$ cat /etc/ssh/ssh_config | grep StrictHostKeyChecking
#   StrictHostKeyChecking ask
}}}

<<toBalaNotes "2">>
{{item1{第二階段 : NameNode 設定 (HDP120)}}}

''1. NameNode 電腦設定檔''

__編輯 conf/master__
{{{
$ cd /mnt/hda1/hadoop-1.0.3/
$ nano conf/masters 
HDP120
}}}

__編輯 conf/slaves__
{{{
$ nano conf/slaves
HDP120
HDP121
HDP122
}}}

__編輯 conf/core-site.xml__
{{{
$ cd /mnt/hda1/hadoop-1.0.3
$ mkdir data                        # 建立 HDFS 資料庫儲存目錄

$ nano conf/core-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>fs.default.name</name>
         <value>hdfs://HDP120:9000</value>
     </property>
     <property>
         <name>hadoop.tmp.dir</name>
         <value>/mnt/hda1/hadoop-1.0.3/data</value>
     </property>
</configuration>
}}}

''[重點]''
It is ''critically important'' in a real cluster that dfs.name.dir and dfs.data.dir be ''moved out'' from ''hadoop.tmp.dir''. A real cluster should never consider these directories temporary, as they are where all persistent HDFS data resides. Production clusters should have two paths listed for ''dfs.name.dir'' which are on two different physical file systems, to ensure that cluster metadata is preserved in the event of hardware failure.

|''dfs.name.dir'' |Path on the local filesystem where the NameNode stores the namespace and transactions logs persistently.|
|''dfs.data.dir'' |Comma separated list of paths on the local filesystem of a DataNode where it should store its blocks.|

__編輯 conf/hdfs-site.xml__
{{{
$ nano conf/hdfs-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>dfs.replication</name>
         <value>2</value>
     </property>
</configuration>
}}}

''[註]'' dfs.safemode.threshold.pct 的值為 0, NameNode 一啟動不會進入 safe mode (read only)
{{{
     <property>
         <name>dfs.safemode.threshold.pct</name>
         <value>1</value>
     </property>
}}}

''2. 重建 data 目錄 (一定要執行此動作)''
{{{
# rm -r data
# mkdir data
}}}

''3. 格式化 NameNode 資料庫'' 
{{{
#
# 因之前已產生資料庫, 所以會詢問你是否重新格式化 ? 請回答 "Y" (一定要大寫)
#
# hadoop namenode -format
12/08/02 19:18:20 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = HDP120/192.168.100.20
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 1.0.3
STARTUP_MSG:   build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1335192; compiled by 'hortonfo' on Tue May  8 20:31:25 UTC 2012
************************************************************/
Re-format filesystem in /mnt/hda1/hadoop-1.0.3/data/dfs/name ? (Y or N) Y
12/08/02 19:18:24 INFO util.GSet: VM type       = 32-bit
12/08/02 19:18:24 INFO util.GSet: 2% max memory = 7.425 MB
12/08/02 19:18:24 INFO util.GSet: capacity      = 2^21 = 2097152 entries
12/08/02 19:18:24 INFO util.GSet: recommended=2097152, actual=2097152
12/08/02 19:18:25 INFO namenode.FSNamesystem: fsOwner=root
12/08/02 19:18:25 INFO namenode.FSNamesystem: supergroup=supergroup
12/08/02 19:18:25 INFO namenode.FSNamesystem: isPermissionEnabled=true
12/08/02 19:18:25 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100
12/08/02 19:18:25 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
12/08/02 19:18:25 INFO namenode.NameNode: Caching file names occuring more than 10 times 
12/08/02 19:18:26 INFO common.Storage: Image file of size 110 saved in 0 seconds.
12/08/02 19:18:26 INFO common.Storage: Storage directory /mnt/hda1/hadoop-1.0.3/data/dfs/name has been successfully formatted.
12/08/02 19:18:26 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at HDP120/192.168.100.20
************************************************************/
}}}

在上面訊息中, Common Storage 目錄是 '' /mnt/hda1/hadoop-1.0.3/data/dfs/name''.

<<toBalaNotes "3">>
{{item1{第三階段 : DataNode 電腦設定 (HDP121,HDP122)}}}

所有 DataNode 電腦均要執行以下設定

''1. 設定 DataNode 電腦''
{{{
# 登入 HDP121, 輸入帳號為 root, 密碼為 student
$ sudo virsh console HDP121
Connected to domain HDP121
Escape character is ^]

Micro Core Linux 3.8.2
HDP121 login: root
Password: 

# 切換至 /mnt/hda1/hadoop-1.0.3 目錄
$ cd /mnt/hda1/hadoop-1.0.3

$ mkdir data                    # 建立 HDFS 資料庫儲存目錄
}}}

''修改 core-site.xml 設定檔''
{{{
$ nano conf/core-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>fs.default.name</name>
         <value>hdfs://HDP120:9000</value>
     </property>
     <property>
         <name>hadoop.tmp.dir</name>
         <value>/mnt/hda1/hadoop-1.0.3/data</value>
     </property>
</configuration>
}}}

''修改 hdfs-site.xml  設定檔''
{{{
$ nano conf/hdfs-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>dfs.replication</name>
         <value>2</value>
     </property>
</configuration>
}}}

''2. 修改 conf/hadoop-env.sh 設定檔''
{{{
$ nano conf/hadoop-env.sh 
# Set Hadoop-specific environment variables here.

# The only required environment variable is JAVA_HOME.  All others are
# optional.  When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.

# The java implementation to use.  Required.
export JAVA_HOME=/mnt/hda1/jdk1.6.0_33    # 修改

# Extra Java CLASSPATH elements.  Optional.
# export HADOOP_CLASSPATH=

# The maximum amount of heap to use, in MB. Default is 1000.
export HADOOP_HEAPSIZE=384                # 修改

# Extra Java runtime options.  Empty by default.
# export HADOOP_OPTS=-server
                       :
}}}

''[重點提示]''

@@color:blue;DataNode 記憶體計算@@
{{{
Datanode                           1*  384 (HADOOP_HEAPSIZE)
Tasktracker                        1 * 384 (HADOOP_HEAPSIZE)
Tasktracker child map task       2 * 200  (mapred.tasktracker.map.tasks.maximum=2)
Tasktracker child reduce task    2 * 200  (mapred.tasktracker.reduce.tasks.maximum=2)
----------------------------------------------------------------------------------------------------------------------------------------------------
Total                                        1568
}}}

@@color:blue;Namenode 記憶體計算@@
{{{
1000 MB per million blocks of storage
}}}

'{{item1{啟動 HDFS (HDP120)}}}

''1. 在 NameNode 電腦, 啟動 HDFS''
{{{
# 登入 HDP120''
$ sudo virsh console HDP120
Connected to domain HDP120
Escape character is ^]

# 啟動 HDFS
$ start-dfs.sh 

# 檢示執行狀態
$ jps
3749 SecondaryNameNode
3575 NameNode
3664 DataNode
3786 Jps
}}}

''[重點提示]''
start-dfs.sh 執行重點
* 在執行 start-dfs.sh 程式的電腦系統中, 啟動 NameNode, 所以 NameNode 不是由 masters 這個檔案設定
* 根據 masters 設定檔, 啟動 secondary namenode
* 根據 slaves 設定檔, 啟動所有 datanode

''2. 檢視 NameNode.log''
{{{
# tail logs/hadoop-root-namenode-HDP120.log 
2011-10-29 17:00:53,137 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of transactions: 2 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 1 SyncTimes(ms): 1 
2011-10-29 17:00:54,124 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Roll FSImage from 192.168.100.20
2011-10-29 17:00:54,124 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of transactions: 0 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 1 SyncTimes(ms): 1 
2011-10-29 17:20:03,864 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.registerDatanode: node registration from 192.168.100.21:50010 storage DS-1615897813-192.168.100.21-50010-1319908803441
2011-10-29 17:20:03,871 INFO org.apache.hadoop.net.NetworkTopology: Adding a new node: /default-rack/192.168.100.21:50010
2011-10-29 17:20:03,963 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameSystem.processReport: from 192.168.100.21:50010, blocks: 0, processing time: 2 msecs
2011-10-29 17:20:04,715 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.registerDatanode: node registration from 192.168.100.22:50010 storage DS-740608667-192.168.100.22-50010-1319908804217
2011-10-29 17:20:04,716 INFO org.apache.hadoop.net.NetworkTopology: Adding a new node: /default-rack/192.168.100.22:50010
2011-10-29 17:20:04,799 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameSystem.processReport: from 192.168.100.22:50010, blocks: 0, processing time: 0 msecs
2011-10-29 17:21:42,957 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameSystem.processReport: from 192.168.100.21:50010, blocks: 0, processing time: 0 msecs
}}}

''3. 檢視可用 DataNode''
{{{
# hadoop dfsadmin -report
Configured Capacity: 12436074496 (11.58 GB)
Present Capacity: 10070712320 (9.38 GB)
DFS Remaining: 9360814080 (8.72 GB)
DFS Used: 709898240 (677.01 MB)
DFS Used%: 7.05%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0

-------------------------------------------------
Datanodes available: 3 (3 total, 0 dead)

Name: 192.168.100.21:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 354930688 (338.49 MB)
Non DFS Used: 397107200 (378.71 MB)
DFS Remaining: 3474087936(3.24 GB)
DFS Used%: 8.4%
DFS Remaining%: 82.21%
Last contact: Sun Jul 01 01:27:14 CST 2012

Name: 192.168.100.22:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 28672 (28 KB)
Non DFS Used: 397103104 (378.71 MB)
DFS Remaining: 3828994048(3.57 GB)
DFS Used%: 0%
DFS Remaining%: 90.6%
Last contact: Sun Jul 01 01:27:14 CST 2012

Name: 192.168.100.20:50010
Decommission Status : Normal
Configured Capacity: 3983822848 (3.71 GB)
DFS Used: 354938880 (338.5 MB)
Non DFS Used: 1571151872 (1.46 GB)
DFS Remaining: 2057732096(1.92 GB)
DFS Used%: 8.91%
DFS Remaining%: 51.65%
Last contact: Sun Jul 01 01:27:12 CST 2012
}}}

{{item1{檢視 DataNode 電腦的啟動服務 (HDP121,HDP122)}}}

''1. 登入 DataNode''
{{{
$ ssh HDP121
}}}

''2. 檢視啟動服務''
{{{
$ jps
1186 Jps
1132 DataNode

$ exit
}}}

''3. 檢視 DataNode.log''
{{{
# tail logs/hadoop-root-datanode-HDP121.log 
2011-10-29 17:20:03,517 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: DatanodeRegistration(192.168.100.21:50010, storageID=DS-1615897813-192.168.100.21-50010-1319908803441, infoPort=50075, ipcPort=50020)In DataNode.run, data = FSDataset{dirpath='/tmp/hadoop-root/dfs/data/current'}
2011-10-29 17:20:03,520 INFO org.apache.hadoop.ipc.Server: IPC Server Responder: starting
2011-10-29 17:20:03,527 INFO org.apache.hadoop.ipc.Server: IPC Server listener on 50020: starting
2011-10-29 17:20:03,531 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 50020: starting
2011-10-29 17:20:03,532 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: using BLOCKREPORT_INTERVAL of 3600000msec Initial delay: 0msec
2011-10-29 17:20:03,534 INFO org.apache.hadoop.ipc.Server: IPC Server handler 0 on 50020: starting
2011-10-29 17:20:03,537 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 50020: starting
2011-10-29 17:20:03,579 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: BlockReport of 0 blocks took 1 msec to generate and 10 msecs for RPC and NN processing
2011-10-29 17:20:03,603 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Starting Periodic block scanner.
2011-10-29 17:21:42,570 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: BlockReport of 0 blocks took 0 msec to generate and 4 msecs for RPC and NN processing
}}}
<<toBalaNotes "4">>
{{item1{上載檔案}}}
{{{
# hadoop dfs -put AS995.vmdk /user
}}}

''檢視上載檔案資訊''
{{{
# hadoop fsck /user/AS995.vmdk -files -blocks -locations
FSCK started by root from /192.168.100.20 for path /user/AS995.vmdk at Sun Jul 01 01:10:06 CST 2012
/user/AS995.vmdk 352124928 bytes, 6 block(s):  OK
0. blk_-581360644797674258_1007 len=67108864 repl=2 [192.168.100.21:50010, 192.168.100.20:50010]
1. blk_7312425541916638216_1007 len=67108864 repl=2 [192.168.100.21:50010, 192.168.100.20:50010]
2. blk_-2056972131637659062_1007 len=67108864 repl=2 [192.168.100.21:50010, 192.168.100.20:50010]
3. blk_-2289638239569806863_1007 len=67108864 repl=2 [192.168.100.21:50010, 192.168.100.20:50010]
4. blk_-4890352689846441895_1007 len=67108864 repl=2 [192.168.100.21:50010, 192.168.100.20:50010]
5. blk_-4165205837078382663_1007 len=16580608 repl=2 [192.168.100.21:50010, 192.168.100.20:50010]

Status: HEALTHY
 Total size:	352124928 B
 Total dirs:	0
 Total files:	1
 Total blocks (validated):	6 (avg. block size 58687488 B)
 Minimally replicated blocks:	6 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	0 (0.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	2
 Average block replication:	2.0
 Corrupt blocks:		0
 Missing replicas:		0 (0.0 %)
 Number of data-nodes:		2
 Number of racks:		1
FSCK ended at Sun Jul 01 01:10:06 CST 2012 in 20 milliseconds

The filesystem under path '/user/AS995.vmdk' is HEALTHY
}}}

{{item1{MapReduce 功能測試}}}

''1. 建立 /count 資料夾''
{{{
# hadoop dfs -mkdir /count
}}}

''2. 上載 文字檔''
{{{
# hadoop dfs -put /mnt/hda1/hadoop-1.0.3/*.txt /count
}}}

''3. 執行單字計算的 MapReduce 程式''
{{{
# hadoop jar /mnt/hda1/hadoop-1.0.3/hadoop-examples-1.0.3.jar wordcount /count /out
}}}

''4. 檢視執行結果''
{{{
# hadoop dfs -cat /out/part-r-00000
}}}
<<toBalaNotes "5">>
{{item1{管理網站}}}

{{op1{HDFS 監控網站 (Port 50070)}}}

[img[img/hadoop/hdfsweb.png]]

{{op1{MapReduce 監控網站 (Port 50030)}}}

[img[img/hadoop/mapredweb.png]]

///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/

///%5
//%/

{{item1{VMM 新增虛擬硬碟檔}}}
只要使用 Virt-Manager 圖形管理工具, 產生的虛擬硬碟檔, 其權限一律設為 @@color:red;root:root@@ 

{{item1{使用已存在虛擬硬碟檔(自行產生)}}}

1. 原先 TC660.vmdk 權限為 ''student:student'', 如使用 VMM 將 TC66 虛擬電腦啟動, 此時 TC660.vmdk 的權限會改成 ''libvirt-qemu:kvm'', 如下圖 :

[img[img/kvm/libvirtp02.png]]

2. 關機後竟然將權限改成 ''root:root'', 如下圖 : 

[img[img/kvm/libvirtp03.png]]

''[註]'' 使用 virsh 命令與 VMM 同樣權限運作方式

<<toBalaNotes "1">>
{{item1{指定虛擬硬碟檔權限}}}

1. 修改 /etc/libvirt/qemu.conf 設定檔, 將虛擬硬碟檔權限設為 student:student, 並將 dynamic_ownership 設為 0

[img[img/kvm/libvirtp04.png]]

2. 重新啟動 libvirt-bin 
{{{
$ sudo service libvirt-bin restart
}}}

3. 啟動 GW100 虛擬電腦後, 權限還是 student:student, 既使關機權限還是 student:student

[img[img/kvm/libvirtp05.png]]
<<toBalaNotes "2">>
///%1
//%/

///%2
//%/

{{item1{啟動檔案式管理系統}}}
{{{
$ cd oc9/oc9fapi/

$ mkdir test

$ apt-get install mkpasswd

$ sudo ./oc9mondir.sh test &

}}}

{{item1{執行登入}}}

編輯 test/login 檔案, 輸入正確的帳號及密碼, 然後存檔, 便可執行帳號驗證. 如驗證成功, test/login 檔案會被刪除, 這時 test 目錄會自動產生 logout 檔案

[img[img/fapi/oc9fapi01.png]]

[img[img/fapi/oc9fapi02.png]]

{{item1{執行登出}}}

編輯 test/login 檔案, 輸入 "ok" 字串, test/logout 檔案會被刪除, 這時 test 目錄會再一次產生 login 檔案

[img[img/fapi/oc9fapi03.png]]

{{item1{停止檔案式管理系統}}}
{{{
$ fg
sudo ./oc9mondir.sh test

^C** Trapped CTRL-C
inotifywait: no process found
已砍掉
}}}

<<toBalaNotes "1">>






///%1
//%/
每一部主機都有一個首頁,但是如果每個使用者都想要有可以自己完全控管的首頁時, 那該如何設計?呵呵!Apache 早就幫我們想到了!

''1. 啟動 mod_userdir 動態模組''
{{{
$ sudo a2enmod userdir
Enabling module userdir.
Run '/etc/init.d/apache2 restart' to activate new configuration!
}}}

''2. 修改 /etc/apache2/httpd.conf 設定檔內容''
{{{
$ sudo nano /etc/apache2/httpd.conf
}}}

修改內容如下 : 
{{{
# Settings for user home directories
#
# Required module: mod_userdir   (a2enmod 命令啟動)
#
# UserDir: The name of the directory that is appended onto a user's home
# directory if a ~user request is received.  Note that you must also set
# the default access control for these directories, as in the example below.
#
UserDir www    

<Directory /home/*/www>
    AllowOverride FileInfo AuthConfig Limit Indexes
    Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec

    # If GET is used it will also restrict HEAD requests. The TRACE method cannot be limited.
    <Limit GET POST OPTIONS>
        Order allow,deny
        Allow from all
    </Limit>

    <LimitExcept GET POST OPTIONS>
        Order deny,allow
        Deny from all
    </LimitExcept>
</Directory>
}}}
上面設定將預設的個人首頁, 放置在家目錄下的 www/ 目錄下!假如 tobala 的家目錄在 /home/tobala,那麼他的個人首頁預設在 /home/tobala/www/ 囉!你當然可以修改這個目錄名稱, 只要將上表的 UserDir 後面內容改掉即可。例如改為 www 時,使用者的個人首頁則放在家目錄下的 www 目錄中。 設定完畢後得要重新啟動你的 apache 喔!
{{{
$ sudo /etc/init.d/apache2 restart
 * Restarting web server apache2                                                 
... waiting                                                             [ OK ]
}}}

''[問題]'' 如何讓未來所有『新增』的使用者預設家目錄下都有個 www 的目錄?

''答:'' 因為新增使用者時所參考的家目錄在 /etc/skel 目錄內,所以你可以直接 mkdir /etc/skel/www 即可。 

好了,假設你的主機有個 student 的用戶,那麼這傢伙怎麼建立他的個人首頁? 我們這樣測試一下好了!(底下的工作請以你的一般身份使用者處理):
{{{
$ su student
$ cd ~
$ mkdir www
$ chmod 755 www
$ echo "Test your home" >  www/index.html
}}}

你的 www 目錄權限成為 drwxr-xr-x 才行!這個很重要啊! 那麼未來只要你在瀏覽器的網址列這樣輸入:
{{{
http://你的主機名稱/~student/
}}}

''[問題]'' 如將 index.html 檔案刪除, 此時瀏覽結果如何 ?

不過,多這個毛毛蟲就很討厭~ 我可不可以將使用者的個人網站設定成為:
{{{
http://你的主機名稱/student/
}}}

是可以啦!最簡單的方法是這樣的:
{{{
$ cd /var/www
$ sudo ln -s /home/student/www student
}}}
<<toBalaNotes "1">>
{{item1{限制 HTTP 命令}}}

1. 使用 Firefox 開啟 ''Linux2008\ajax\AjaxCall.htm'' 網頁

2. 在 Method 欄位, 輸入 GET, 然後在 URL 欄位, 輸入 http://apache IP/~student

3. 執行結果, 如下圖 :

[img[img/apache2/ap2test01.png]]

4. 在 Method 欄位, 輸入 HEAD, 執行結果, 如下圖 :

[img[img/apache2/ap2test02.png]]

5. 修改 /etc/apache2/httpd.conf  設定檔, 如下 :
在 Limit 設定, 請移除 GET 命令
{{{
               : 
    <Limit POST OPTIONS>
        Order allow,deny
        Allow from all
    </Limit>

    <LimitExcept POST OPTIONS>
        Order deny,allow
        Deny from all
    </LimitExcept>
}}}

''[註]'' If ''GET'' is used it will also restrict ''HEAD'' requests. The ''TRACE'' method cannot be limited.

6. 重新啟動 Apache 2, 再一次執行 HEAD 命令, 會出現錯誤訊息, 結果如下 :

[img[img/apache2/ap2test03.png]]
<<toBalaNotes "2">>
{{item1{限制連接主機}}}

1. 修改 /etc/apache2/httpd.conf  設定檔, 如下 :
{{{
<Directory /home/*/www>
                  :
    <Limit GET POST OPTIONS>
        Order allow,deny
        Allow from all
	Deny from 192.168.56.1   
    </Limit>
                  :
</Directory>
}}}

2. 在 Method 欄位, 輸入 HEAD, 執行結果, 會出現錯誤訊息, 如下圖 :

[img[img/apache2/ap2test04.png]]

<<toBalaNotes "3">>
{{item1{AllowOverride}}}
是否允許額外設定檔 .htaccess 的權限複寫?我們可以在 httpd.conf 內設定好所有的權限,不過如此一來若使用者自己的個人網頁想要修改權限時將會對管理員造成困擾。因此 Apache 預設可以讓使用者以目錄底下的 .htaccess 檔案內複寫 <Directory> 內的權限設定。 這個項目則是在規定 .htaccess 可以複寫的權限類型有哪些。常見的有:
{{{
* ALL:全部的權限均可被複寫;
* AuthConfig:僅有網頁認證 (帳號密碼) 可複寫;
* Indexes:僅允許 Indexes 方面的複寫;
* Limits:允許使用者利用 Allow, Deny 與 Order 管理可瀏覽的權限;
* None:不可複寫,亦即 .htaccess 檔案失效去!
}}}

{{item1{Order}}}
''The Order directive, along with the Allow and Deny directives, controls a @@color:red;three-pass@@ access control system''. The first pass processes either all Allow or all Deny directives, as specified by the Order directive. The second pass parses the rest of the directives (Deny or Allow). The third pass applies to all requests which do not match either of the first two.

''[註]'' that all Allow and Deny directives are processed, unlike a typical firewall, where only the first match is used. The last match is effective (also unlike a typical firewall). Allow 及 Deny 命令均要檢查, 最後一個符合條件生效

Additionally, the order in which lines appear in the configuration files is not significant -- all Allow lines are processed as one group, all Deny lines are considered as another, and the default state is considered by itself.

Ordering is one of: (以下說明一定要看)

''Allow,Deny''
''First'', all Allow directives are evaluated; at least one must match, or the request is rejected. ''Next'', all Deny directives are evaluated. If any matches, the request is rejected. ''Last'', any requests which do not match an Allow or a Deny directive are denied by default.

''Deny,Allow''
''First'', all Deny directives are evaluated; if any match, the request is denied unless it also matches an Allow directive. Any requests which do not match any Allow or Deny directives are permitted.

''Mutual-failure''
This order has the same effect as Order Allow,Deny and is deprecated in its favor. 

In the following example, all hosts in the apache.org domain are allowed access; all other hosts are denied access.
{{{
Order Deny,Allow
Deny from all
Allow from apache.org
}}}
In the next example, all hosts in the apache.org domain are allowed access, except for the hosts which are in the foo.apache.org subdomain, who are denied access. All hosts not in the apache.org domain are denied access because the default state is to Deny access to the server.
{{{
Order Allow,Deny
Allow from apache.org
Deny from foo.apache.org
}}}
On the other hand, if the Order in the last example is changed to Deny,Allow, all hosts will be allowed access. This happens because, regardless of the actual ordering of the directives in the configuration file, the Allow from apache.org will be evaluated last and will override the Deny from foo.apache.org. All hosts not in the apache.org domain will also be allowed access because the default state is Allow.

The presence of an Order directive can affect access to a part of the server even in the absence of accompanying Allow and Deny directives because of its effect on the default access state. For example,
{{{
<Directory /www>
   Order Allow,Deny
</Directory> 
}}}
''will Deny all access to the /www directory because the default access state is set to Deny''.

The Order directive controls the order of access directive processing only within each phase of the server's configuration processing. This implies, for example, that an Allow or Deny directive occurring in a <Location> section will always be evaluated after an Allow or Deny directive occurring in a <Directory> section or .htaccess file, regardless of the setting of the Order directive. For details on the merging of configuration sections, see the documentation on How Directory, Location and Files sections work.
<<toBalaNotes "4">>



///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/
''參考文章''
1. What the HTTP is CouchApp?
http://couchapp.org/page/what-is-couchapp
2. CouchApp.org
http://couchapp.org/page/index
3. CouchOne
http://origin.couchone.com
4. CouchOne Recipes
http://origin.couchone.com/page/recipes

{{item1{CouchApp 安裝}}}
CouchApp is a very powerful app development metaphor based on CouchDB. Apps are encapsulated in a JSON object (CouchDB document) and saved in CouchDB just like data.

To install couchapp using easy_install you must make sure you have a recent version of distribute installed:
{{{
$ curl -O http://python-distribute.org/distribute_setup.py
$ sudo python distribute_setup.py
$ easy_install pip
}}}
To install or upgrade to the latest released version of couchapp:
{{{
$ pip install couchapp
}}}

{{item1{HelloWorld 應用系統}}}

''1. 建立 helloworld 資料庫''
{{{
# curl -X PUT http://192.168.122.92:5984/helloworld
{"error":"unauthorized","reason":"You are not a server admin."}

# curl -X PUT http://admin:admin@192.168.122.92:5984/helloworld
{"ok":true}
}}}

''2. 建立應用系統目錄''
{{{
# mkdir myapps
# mkdir myapps/hw0
# mkdir myapps/hw0/_attachments
}}}

''3. 編輯網頁''
{{{
# nano myapps/hw0/_attachments/index.html

<!doctype html>
<html>
<head></head>
<body><h2>Hello World!</h2></body>
</html>
}}}

''4. 上載應用系統''
{{{
# couchapp push hw0 http://admin:admin@192.168.122.92:5984/helloworld
2011-02-23 07:32:34 [INFO] Visit your CouchApp here:
http://192.168.122.92:5984/helloworld/_design/hw0/index.html
}}}

''5. 執行 應用系統''
{{{
# curl http://192.168.122.92:5984/helloworld/_design/hw0/index.html
<!doctype html>
<html>
<head></head>
<body><h2>Hello World!</h2></body>
</html>
}}}

{{item1{加入多媒體檔案}}}

''1. 建立圖檔目錄''
{{{
# mkdir myapps/hw0/_attachments/img
}}}

''2. 將 box.jpg 圖檔放入 myapps/hw0/_attachments/img 目錄''

''3. 修改首頁''
{{{
$ nano myapps/hw0/_attachments/index.html
<!doctype html>
<html>
<head></head>
<body><h2>Hello World!</h2></body>
<img src='img/box.gif'/>
</html>
}}}

''4. 上載應用系統''
{{{
# couchapp push hw0 http://admin:admin@192.168.122.92:5984/helloworld
2011-02-23 07:32:34 [INFO] Visit your CouchApp here:
http://192.168.122.92:5984/helloworld/_design/hw0/index.html
}}}
<<toBalaNotes "1">>









///%1
//%/
''參考文章''
1. Universal TUN/TAP device driver.
http://www.kernel.org/doc/Documentation/networking/tuntap.txt
2. Tun/Tap interface tutorial
http://backreference.org/2010/03/26/tuntap-interface-tutorial/
3. Ubuntu Server Guide
https://help.ubuntu.com/12.04/serverguide/index.html
4. 20 Linux System Monitoring Tools Every SysAdmin Should Know
http://www.cyberciti.biz/tips/top-linux-monitoring-tools.html

''From Wikipedia''
''TAP (as in network tap)'' simulates a link layer device and it operates with layer 2 packets such as Ethernet frames. ''TUN (as in network TUNnel)'' simulates a network layer device and it operates with layer 3 packets such as IP packets. TAP is used to create a network bridge, while TUN is used with routing.

Packets sent by an operating system via a TUN/TAP device are delivered to a user-space program that attaches itself to the device. A user-space program may also pass packets into a TUN/TAP device. In this case TUN/TAP device delivers (or "injects") these packets to the operating-system network stack thus emulating their reception from an external source.

@@font-size:14pt;
The Tun/Tap user-space tunnel driver which was included in the Linux kernel as of version ''2.4'', also originally developed by ''Maxim Krasnyansky''. ''Bishop Clark'' is the current maintainer.
@@
{{op1{tun (network TUNnel) 虛擬的是 點對點 設備}}}
{{{
-simulates a network layer device
-layer 3 packets, such as IP packet
-used with routing
}}}
{{op1{tap (network TAP) 虛擬的是 乙太網路 設備}}}
{{{
-simulates an Ethernet device
-layer 2 packets, such as Ethernet frames
-used to create a network bridge
}}}

{{item1{安裝 TUN/TAP 管理套件}}}
TUN/TAP 原本就是 Linux 系統所提供的虛擬網路介面裝置,只是預設並不是每套 Linux 的發行版本都有內建此功能,以 Ubuntu 12.04 來說,預設就沒有安裝 TUN/TAP 的相關套件,所以要使用 TUN/TAP 來建立虛擬網路卡之前,則需要先來安裝操控 TAP 的相關套件。

操控 TUN/TAP 的套件名稱為 ”uml_utilities”,所以可以利用以下指令來安裝此套件:
{{{
$ tunctl
程式 'tunctl' 目前尚未安裝。  您可以由輸入以下內容安裝:
sudo apt-get install uml-utilities

$ sudo apt-get install uml-utilities
正在讀取套件清單... 完成
正在重建相依關係
正在讀取狀態資料... 完成
建議套件:
  user-mode-linux
下列【新】套件將會被安裝:
  uml-utilities
升級 0 個,新安裝 1 個,移除 0 個,有 0 個未被升級。
需要下載 61.9 kB 的套件檔。
此操作完成之後,會多佔用 267 kB 的磁碟空間。
下載:1 http://ftp.twaren.net/Linux/Ubuntu/ubuntu/ precise/universe uml-utilities amd64 20070815-1.3ubuntu1 [61.9 kB]
取得 61.9 kB 用了 1秒 (34.5 kB/s)
Selecting previously unselected package uml-utilities.
(正在讀取資料庫 ... 199076 files and directories currently installed.)
正在解開 uml-utilities (從 .../uml-utilities_20070815-1.3ubuntu1_amd64.deb)...
正在進行 ureadahead 的觸發程式 ...
ureadahead will be reprofiled on next reboot
正在進行 man-db 的觸發程式 ...
正在設定 uml-utilities (20070815-1.3ubuntu1) ...
 * Starting User-mode networking switch uml_switch                       [ OK ]
}}}

{{item1{設定 TAP 網路裝置}}}

''1. 產生 TAP 網路裝置''
{{{
$ sudo tunctl -u student
Set 'tap0' persistent and owned by uid 1000
}}}

''2. 手動設定 TAP 網卡的 MAC 位址''
{{{
$ ifconfig tap0 hw ether 4c:22:d0:b8:78:ae
SIOCSIFHWADDR: 此項操作並不被允許

$ sudo ifconfig tap0 hw ether 4c:22:d0:b8:78:ae
$ ifconfig tap0
tap0      Link encap:Ethernet  HWaddr 4c:22:d0:b8:78:ae  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''3. 檢視 TAP 網卡硬體規格''
{{{
$ sudo lshw -class network | sed -n '/tap/,/driver=tun/p'
       logical name: tap0 
       serial: 4c:22:d0:b8:78:ae
       size: 10Mbit/s
       capabilities: ethernet physical
       configuration: autonegotiation=off broadcast=yes driver=tun driverversion=1.6 duplex=full firmware=N/A link=no multicast=yes port=twisted pair speed=10Mbit/s
}}}

''4. 移除 TAP 網路裝置''
{{{
$ tunctl -d tap0
Set 'tap0' nonpersistent
}}}

{{item1{Q & A}}}
''1. What is the difference between TUN driver and TAP driver ?''
{{{
TUN works with IP frames. TAP works with Ethernet frames.

This means that you have to read/write IP packets when you are using tun and
ethernet frames when using tap.
}}}
<<toBalaNotes "1">>
{{item1{建置 Tap+SLiRP 網路}}}

{{op1{設定 Gateway 虛擬電腦}}}

''@@color:blue;[HOST OS]@@''
''1. 產生 student 帳號專屬的 TAP 網卡 (必須使用 sudo 命令)''
{{{
$ sudo tunctl -u student
Set 'tap0' persistent and owned by uid 1000
}}}

''2. 設定 Tap0 網卡的 IP 位址 (必須使用 sudo 命令)''
{{{
$ sudo ifconfig tap0 172.16.30.254 up
}}}

''3. 啟動虛擬電腦''
執行以下命令, 不須使用 sudo 命令, 因 Tap0 網卡已授權 student 帳號使用. 多增加一片負責 SLIRP 連接的網卡, 並設定 8012 埠, 等待連接 client 虛擬電腦
{{{
$ kvm -name gateway -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic -net tap,ifname=tap0,script=no,downscript=no -net socket,listen=:"8012" 
}}}

''@@color:blue;[Gateway 虛擬電腦]@@''
''1. 設定 Gateway 虛擬電腦''
{{{
$ sudo ifconfig eth0 172.16.30.5 netmask 255.255.255.0 up

$ sudp route add default gw 172.16.30.254

$ sudo sh -c "echo  'nameserver 168.95.1.1 > /etc/resolv.conf"
}}}
<<toBalaNotes "2">>
{{op1{設定 Client 虛擬電腦}}}

''@@color:blue;[HOST OS]@@''
''1. 啟動第一部 Client 虛擬電腦 (Client1)''
{{{
$ kvm -name "client1" -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic,macaddr="52:54:00:12:34:13" -net socket,connect=127.0.0.1:8012 &
}}}

''@@color:blue;[Client1 虛擬電腦]@@''
''2. 設定 Client1 虛擬電腦''
{{{
$ sudo ifconfig eth0 172.16.30.13 netmask 255.255.255.0 up

$ sudo route default gw 172.16.30.254
}}}

''@@color:blue;[HOST OS]@@''
''1. 啟動第二部 Client 虛擬電腦 (Client2)''
{{{
$ kvm -name "client2" -m 128 -kernel kernel/vmlinuz -initrd kernel/core.gz -net nic,macaddr="52:54:00:12:34:14" -net socket,connect=127.0.0.1:8012 &
}}}

''@@color:blue;[Client2 虛擬電腦]@@''
''2. 設定 Client2 虛擬電腦''
{{{
$ sudo ifconfig eth0 172.16.30.14 netmask 255.255.255.0 up

$ sudo route default gw 172.16.30.254
}}}

''@@color:blue;[HOST OS]@@''
{{op1{上網設定}}}

''檢視是否開啟 IPv4 forwarding 功能''
{{{
$ cat /proc/sys/net/ipv4/ip_forward
0

#如果上式執行結果是 0, 請執行以下命令, 開啟 IPv4 forwarding 功能
$ sudo bash -c "echo 1 > /proc/sys/net/ipv4/ip_forward
}}}

''啟動 NAT''
{{{
$ sudo iptables -t nat -A POSTROUTING -s 172.16.10.0/24 -j MASQUERADE
}}}
''[註]'' 可以不指定對外網路介面 (-o eth0)

''刪除 NAT 設定''
{{{
$ sudo iptables -t nat -L -n --line-numbers
Chain PREROUTING (policy ACCEPT)
num  target     prot opt source               destination         

Chain INPUT (policy ACCEPT)
num  target     prot opt source               destination         

Chain OUTPUT (policy ACCEPT)
num  target     prot opt source               destination         

Chain POSTROUTING (policy ACCEPT)
num  target     prot opt source               destination         
1    MASQUERADE  tcp  --  192.168.122.0/24    !192.168.122.0/24     masq ports: 1024-65535
2    MASQUERADE  udp  --  192.168.122.0/24    !192.168.122.0/24     masq ports: 1024-65535
3    MASQUERADE  all  --  192.168.122.0/24    !192.168.122.0/24    
4    MASQUERADE  all  --  172.16.10.0/24       0.0.0.0/0      
     
$ sudo iptables -t nat -D POSTROUTING 4

$ sudo iptables -t nat -L -n --line-numbers
Chain PREROUTING (policy ACCEPT)
num  target     prot opt source               destination         

Chain INPUT (policy ACCEPT)
num  target     prot opt source               destination         

Chain OUTPUT (policy ACCEPT)
num  target     prot opt source               destination         

Chain POSTROUTING (policy ACCEPT)
num  target     prot opt source               destination         
1    MASQUERADE  tcp  --  192.168.122.0/24    !192.168.122.0/24     masq ports: 1024-65535
2    MASQUERADE  udp  --  192.168.122.0/24    !192.168.122.0/24     masq ports: 1024-65535
3    MASQUERADE  all  --  192.168.122.0/24    !192.168.122.0/24    
}}}
<<toBalaNotes "3">>
{{item1{以文字模式啟動虛擬電腦}}}
{{{
-nographic : Normally, QEMU uses SDL to display the VGA output. With this option, you can totally disable graphical output so that QEMU is a simple command line application. The emulated serial port is redirected on the console. Therefore, you can still use QEMU to debug a Linux kernel with a serial console.

-curses : Normally, QEMU uses SDL to display the VGA output. With this option, QEMU can display the VGA output when in text mode using a curses/ncurses interface. Nothing is displayed in graphical mode.
}}}

{{item1{Simple DirectMedia Layer (SDL)}}}
维基百科 : http://zh.wikipedia.org/wiki/SDL
{{{
SDL(Simple DirectMedia Layer)是一套開放原始碼的跨平台多媒體開發函式庫,使用C語言寫成。SDL提供了數種控制圖像、聲音、輸出入的函式,讓開發者只要用相同或是相似的程式碼就可以開發出跨多個平台(Linux、Windows、Mac OS X等)的應用軟體。目前SDL多用於開發遊戲、模擬器、媒體播放器等多媒體應用領域。

SDL使用GNU寬通用公共許可證為授權方式,意指動態連結(dynamic link)其函式庫並不需要開放本身的原始碼。因此諸如《雷神之鎚4》(Quake 4)等商業遊戲也使用SDL來開發。
}}}


///%1
//%/

///%2
//%/

///%3
//%/


{{item1{虛擬教學模組管理}}}

在指定目錄,  會自動產生教學模組的介面檔 (Lab101, Lab201,...), 在 Lab101 介面檔輸入 "start" 字串, 代表啟動 Lab101 所有虛擬電腦

{{item1{核心系統更新}}}

使用 kvm-nbd 命令, 直接掛載虛擬電腦硬碟檔, 然後置換 Linux 系統核心檔

{{item1{分析系統設定}}}

使用 kvm-nbd 命令, 直接掛載虛擬電腦硬碟檔, 然後取出系統設定資訊, 進行分析

{{item1{備份資訊}}}

使用 kvm-nbd 命令, 直接掛載虛擬電腦硬碟檔, 然後備份系統資訊

<<toBalaNotes "1">>


///%1
//%/
The NameNode stores modifications to the file system as a log appended to a native file system file (''edits''). When a NameNode starts up, it reads HDFS state from an image file (''fsimage'') and then applies edits from the edits log file. It then writes new HDFS state to the fsimage and starts normal operation with an empty edits file. Since NameNode merges fsimage and edits files only during start up, the edits log file could get very large over time on a busy cluster. Another side effect of a larger edits file is that next restart of NameNode takes longer.

''The secondary NameNode merges the fsimage and the edits log files periodically and keeps edits log size within a limit. It is usually run on a different machine than the primary NameNode since its memory requirements are on the same order as the primary NameNode. The secondary NameNode is started by bin/start-dfs.sh on the nodes specified in @@color:blue;conf/masters@@ file.''

The start of the checkpoint process on the secondary NameNode is controlled by two configuration parameters.

''fs.checkpoint.period'', set to 1 hour by default, specifies the maximum delay between two consecutive checkpoints, and
''fs.checkpoint.size'', set to 64MB by default, defines the size of the edits log file that forces an urgent checkpoint even if the maximum checkpoint delay is not reached.

The secondary NameNode stores the latest checkpoint in a directory which is structured the same way as the primary NameNode's directory. So that the check pointed image is always ready to be read by the primary NameNode if necessary.

The latest checkpoint can be imported to the primary NameNode if all other copies of the image and the edits files are lost. In order to do that one should:

Create an empty directory specified in the dfs.name.dir configuration variable; 
Specify the location of the checkpoint directory in the configuration variable fs.checkpoint.dir;
and start the NameNode with -importCheckpoint option.

The NameNode will upload the checkpoint from the fs.checkpoint.dir directory and then save it to the NameNode directory(s) set in dfs.name.dir. The NameNode will fail if a legal image is contained in dfs.name.dir. The NameNode verifies that the image in fs.checkpoint.dir is consistent, but does not modify it in any way. 

{{item1{NameNode : HDP120}}}
''1. 登入 HDP120''
{{{
$ sudo virsh console HDP120
Connected to domain HDP120
Escape character is ^]

}}}

''2. 修改 /etc/hosts 檔案''
請加入 "192.168.100.30  HDP130" 這行資料
{{{
$ nano /etc/hosts
127.0.0.1       localhost
192.168.100.20  HDP120
192.168.100.21  HDP121
192.168.100.22  HDP122
192.168.100.30  HDP130
                     :
}}}

''3. 將 /etc/hosts 複製給 HDP130''
{{{
# scp /etc/hosts root@192.168.100.30:/etc/hosts
The authenticity of host '192.168.100.30 (192.168.100.30)' can't be established.
RSA key fingerprint is 8d:b7:99:60:7f:39:05:b5:09:5f:ed:a4:af:27:cb:46.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.100.30' (RSA) to the list of known hosts.
root@192.168.100.30's password: 
hosts                                         100%  306     0.3KB/s   00:00  

}}}

''4. 將 HDP120 的 SSH Server 憑證, 複製給 HDP130''
{{{
# scp ~/.ssh/id_dsa.pub root@HDP130:/root/.ssh/authorized_keys
The authenticity of host 'hdp130 (192.168.100.30)' can't be established.
RSA key fingerprint is 8d:b7:99:60:7f:39:05:b5:09:5f:ed:a4:af:27:cb:46.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hdp130' (RSA) to the list of known hosts.
root@hdp130's password: 
id_dsa.pub                                    100%  601     0.6KB/s   00:00  
}}}

''5. 指定 HDP130 為 Secondary NameNode''
masters 這個檔案, 用來指定 Secondary NameNode, 而不是 NameNode
{{{
# cd /mnt/hda1/hadoop-1.0.3
# nano conf/masters
HDP130
}}}

''6. 登入 HDP130, 儲存新複製的檔案''
{{{
# ssh HDP130

# filetool.sh -b
Backing up files to /mnt/hda1/tce/mydata.tgz 

# exit
}}}

''7. 宣告 Secondary NameNode 的資料傳送 Port''
在 hdfs-site.xml 這設定檔宣告, 如下 :
{{{
$ nano conf/hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

     <property>
         <name>dfs.http.address</name>
         <value>HDP120:50070</value>
     </property> 

     <property>  
         <name>dfs.secondary.http.address</name>  
         <value>HDP130:50090</value>  
     </property>  

     <property>
         <name>dfs.replication</name>
         <value>2</value>
     </property>

</configuration>
}}}

''8. 脫離 HDP120 控制台''
{{{
請按 Ctrl + ] 
}}}
<<toBalaNotes "1">>
{{item1{SecondaryNameNode : HDP130}}}

''1. 登入 HDP130''
{{{
$ sudo virsh console HDP130
Connected to domain HDP130
Escape character is ^]

Micro Core Linux 3.8.2
HDP130 login: root
Password: 
}}}
''[註]'' 登入帳號 root, 密碼為 student

''2. 建立資料目錄''
{{{
$ cd /mnt/hda1/hadoop-1.0.3

$ mkdir data
}}}

''3. 宣告 NameNode 的資料傳送 Port''
在 hdfs-site.xml 這設定檔宣告, 如下 :
{{{
$ nano conf/hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

     <property>
         <name>dfs.http.address</name>
         <value>HDP120:50070</value>
     </property> 

     <property>  
         <name>dfs.secondary.http.address</name>  
         <value>HDP130:50090</value>  
     </property>  

     <property>
         <name>dfs.replication</name>
         <value>2</value>
     </property>

</configuration>
}}}

''4. 脫離 HDP120 控制台''
{{{
請按 Ctrl + ] 
}}}

<<toBalaNotes "2">>
{{item1{開始測試}}}
''1. 登入 NameNode (HDP120)''
{{{
$ sudo virsh console HDP120
}}}

''2. 啟動 HDFS 系統''
{{{
$ start-dfs.sh 
}}}

''3. 在 HDP120 系統, 登入 Secondary NameNode (HDP130)''
{{{
$ ssh HDP130
}}}

''4. 檢視目錄運作資訊''
SecondaryNameNode 根據 "fs.checkpoint.period"  的設定, 定時會去整合 NameNode 的 fsimage, 整合資料會在以下目錄出現
{{{
# tree -h /mnt/hda1/hadoop-1.0.3/data/
/mnt/hda1/hadoop-1.0.3/data/
`-- [4.0K]  dfs
    `-- [4.0K]  namesecondary
        |-- [4.0K]  current
        |   |-- [ 101]  VERSION
        |   |-- [   4]  edits
        |   |-- [ 543]  fsimage
        |   `-- [   8]  fstime
        |-- [4.0K]  image
        |   `-- [ 157]  fsimage
        |-- [   0]  in_use.lock
        `-- [4.0K]  previous.checkpoint
            |-- [ 101]  VERSION
            |-- [   4]  edits
            |-- [ 543]  fsimage
            `-- [   8]  fstime
}}}

''5. 檢視連接運作資訊''
在以下 Log 資訊中, 得知 NameNode 會打開 50070 Port, 給 SecondaryNameNode 下載需整合的資料, 而 SecondaryNameNode 會打開 50090 Port, 給 NameNode 下載整合好的 fsimage 檔案
{{{
# tail /mnt/hda1/hadoop-1.0.3/logs/hadoop-root-secondarynamenode-HDP130.log 
2012-08-02 14:10:27,740 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
2012-08-02 14:10:27,757 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Caching file names occuring more than 10 times 
2012-08-02 14:10:28,151 INFO org.apache.hadoop.hdfs.server.common.Storage: Number of files = 6
2012-08-02 14:10:28,158 INFO org.apache.hadoop.hdfs.server.common.Storage: Number of files under construction = 0
2012-08-02 14:10:28,159 INFO org.apache.hadoop.hdfs.server.common.Storage: Edits file /mnt/hda1/hadoop-1.0.3/data/dfs/namesecondary/current/edits of size 4 edits # 0 loaded in 0 seconds.
2012-08-02 14:10:28,160 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of transactions: 0 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 0 SyncTimes(ms): 0 
2012-08-02 14:10:29,036 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 543 saved in 0 seconds.
2012-08-02 14:10:29,330 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 543 saved in 0 seconds.
2012-08-02 14:10:29,612 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Posted URL HDP120:50070putimage=1&port=50090&machine=HDP130&token=-32:1266403051:0:1343916628000:1343916324578
2012-08-02 14:10:29,902 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Checkpoint done. New Image Size: 543
}}}
<<toBalaNotes "3">>


///%1
//%/

///%2
//%/

///%3
//%/
''參考文章''
1. 認識邊緣網路架構 VEB、VN-link、VEPA技術介紹 (一定要看)
http://www.netadmin.com.tw/article_content.aspx?sn=1112070005
2. A nice overview of MacVTap (without boring details about not doing what you would expect)
http://virt.kernelnewbies.org/MacVTap
3.  Configuring a Bridge in Linux (有片段文字, 說明為何產生 Bridge 會多一張網卡)
http://www.6test.edu.cn/~lujx/linux_networking/0131777203_ch12lev1sec3.html
4. Kernel Korner - Linux as an Ethernet Bridge
http://www.linuxjournal.com/article/8172
5. ebtables (根據L2的資訊去過濾封包)
http://ebtables.sourceforge.net/
6. Linux Bridge With ‘brctl’ Tutorial
http://www.lainoox.com/bridge-brctl-tutorial-linux/
7.  [原創] Linux Bridge 下 清除 MAC Address Table
http://binaryhacking.blogspot.tw/2008/02/linux-bridge-mac-address-table.html
8. Set Up The Bridge
http://www.linuxdoc.org/HOWTO/BRIDGE-STP-HOWTO/set-up-the-bridge.html

{{item1{認識 Bridge}}}
Bridge 簡單來說,就是在電腦網路中,封包交換的技術,與 Router 不一樣的地方,在於 bridge 預設並不知道特定裝置的所在地,而是利用 flooding 的方式來廣播封包,等到查詢的裝置回應之後,就會記錄裝置的 MAC address,以避免再次傳送大量封包來尋找。

而 Bridge在 最開始的時候,收到要交換轉送的封包時,會去檢查封包中 MAC address 的資訊,並根據這個 MAC address 去查詢自己的轉送表(forwarding table),而此資料庫存放著M AC address 與對應出入埠的資訊,這個資料庫內的資訊是怎樣建立的呢?其實是學習而來的。

舉例來說,現在有三台電腦(Host),A、B、C 與一組 bridge,而此 bridge 有三個連接埠port,而 A、B 與 C 則分別連結到其中一個的 bridge port,而當 A 要傳送訊息到 B 的時候,bridge會檢查這個來源位置,並且與所連結的 bridge port number 的資訊,一併記錄在轉送表中,接著會去檢查目的位置的資訊是否存在轉送表內,如果沒有,那就會利用 flooding 的方式來廣播封包到其他的 bridge port,而這樣的方式,也就是大家所熟悉的廣播風暴(Broadcast)。

封包傳送到 Host B 與 C 之後,Host C 會直接忽視並丟棄此封包,而 Host B 辨識此目的位置是自己之後,就會送出回應訊息給 Host A,而 bridge 在轉送回應訊息封包的時候,就會將 Host B 與對應的連接埠,以及與 Host A 的連線資訊記錄在轉送表之中,之後 Host A 與 Host B 之間的連線,就會直接建立,bridge 也不會在傳送廣播封包給其他的 Host,而這也就是 bridge 學習與建立轉送表的過程。

''※ 區域網路的保護機制:Spanning-Tree Protocol''

然而,相信大家都知道,在區域網路內,廣播風暴是很佔頻寬的,且在區域網路內,也蠻容易因為某些因素而產生網路迴路(迴圈),所以為了避免發生這樣的狀況,則必須有一個保護與協調的機制,而這就是 Spanning-Tree Protocol(STP)。

Spanning-Tree Protocol(STP)是一種Level 2的網路通訊協定,主要作用在 Bridge(橋接器)或是 Switch(交換器)上,其最主要的目的,是當使用 Brisge 或 Switch 連結成網路時,來避免因為 Redundant Path(額外路徑或冗餘路徑)的機制,而造成網路迴路(loop)的狀況,以確保兩個節點(連接點)之間只有一條聯繫路徑存在。所以當 Bridge 或 Switch 有使用 STP 協定的時候,就能提供一個無迴圈的網路環境,也可以避免廣播封包情形的發生,保持區域網路的暢通。

[img[img/bridge.gif]]

{{item1{產生橋接網路裝置}}}
{{{
$ sudo brctl addbr br01
}}}

''brctl'' : 產生橋接網路裝置的指令
''addbr'' : 產生橋接網路裝置,後方接著為裝置名稱,如 br01

{{item1{查看橋接網路裝置 (br01) 的資訊}}}
{{{
$ brctl show br01
bridge name	bridge id		STP enabled	interfaces
br01		8000.000000000000	no	
}}}

''[註]'' 新建橋接網路裝置 br01, 內定沒啟動 STP 功能, "bridge id" 中的 8000 是 id, 可是句點 (.) 後面是橋接裝置的 MAC 位址, 內定是 00:00:00:00:00:00  

{{item1{橋接網路裝置的內部網卡 (網卡名稱與橋接網路裝置同名)}}}
{{{
$ ifconfig br01
br01      Link encap:Ethernet  HWaddr 16:57:a5:6c:d2:b6  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)	
}}}

''[註]'' 內部網卡主要作為橋接網路裝置所建立網段封包管理之用. 如內部網卡沒啟動, 橋接網路裝置可不可以使用 ?  

{{item1{移除橋接網路裝置}}}
{{{
$ sudo brctl delbr br01
}}}
<<toBalaNotes "1">>
{{item1{設定橋接網路}}}
''1. 產生橋接網路裝置''
{{{
$ sudo brctl addbr swhub

$ ifconfig swhub
swhub     Link encap:Ethernet  HWaddr 2a:3f:01:f8:2b:e6  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

}}}

''2. 設定橋接網路裝置的 IP''
{{{
$ sudo ifconfig swhub 172.16.20.254 netmask 255.255.255.0 up

$ ifconfig swhub
swhub     Link encap:Ethernet  HWaddr 2a:3f:01:f8:2b:e6  
          inet addr:172.16.20.254  Bcast:172.16.255.255  Mask:255.255.255.0
          inet6 addr: fe80::283f:1ff:fef8:2be6/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:26 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:6005 (6.0 KB)
}}}

<<toBalaNotes "2">>
{{item1{設定 TAP 網路介面}}}

''1. 產生二個 TAP 網路介面''
{{{
$ sudo tunctl -b -u student
tap0

$ sudo tunctl -b -u student
tap1
}}}

''2. 啟動新建 TAP 網路裝置''
{{{
$ sudo ifconfig tap0 up
$ ifconfig tap0
tap0      Link encap:Ethernet  HWaddr 06:1d:10:90:0c:29  
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

$ sudo ifconfig tap1 up
$ ifconfig tap1
tap1      Link encap:Ethernet  HWaddr 6e:5f:65:68:41:48  
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

{{item1{設定 Switch-HUB 網路裝置}}}
''1. 將 TAP 網路介面, 連接至橋接網路裝置''
{{{
$ sudo brctl addif swhub tap1
$ sudo brctl addif swhub tap0
}}}

''2. 檢視 swhub 橋接網路裝置的 MAC 表格''
{{{
$ sudo brctl showmacs swhub
port no	mac addr		is local?	ageing timer
  2	06:1d:10:90:0c:29	yes		   0.00
  1	6e:5f:65:68:41:48	yes		   0.00
}}}

swhub 橋接網路裝置, 會根據加入網卡裝置, 選其最小的 MAC 位址, 作為本身的 MAC 位址, 內容如下 :
{{{
$ ifconfig swhub
swhub     Link encap:Ethernet  HWaddr 06:1d:10:90:0c:29  
          inet addr:172.16.20.254  Bcast:172.16.255.255  Mask:255.255.0.0
          inet6 addr: fe80::384f:a4ff:fec7:5799/64 Scope:Link
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:56 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:10545 (10.5 KB)
}}}

''3. 檢視 swhub 內部運作資訊''
{{{
$ sudo brctl showstp swhub
swhub
 bridge id		8000.061d10900c29
 designated root	8000.061d10900c29
 root port		   0			path cost		   0
 max age		  20.00			bridge max age		  20.00
 hello time		   2.00			bridge hello time	   2.00
 forward delay		  15.00			bridge forward delay	  15.00
 ageing time		 300.01
 hello timer		   0.36			tcn timer		   0.00
 topology change timer	   0.00			gc timer		  32.29
 flags			

tap0 (2)
 port id		8002			state		       disabled
 designated root	8000.061d10900c29	path cost		 100
 designated bridge	8000.061d10900c29	message age timer	   0.00
 designated port	8002			forward delay timer	   0.00
 designated cost	   0			hold timer		   0.00
 flags			

tap1 (1)
 port id		8001			state		       disabled
 designated root	8000.061d10900c29	path cost		 100
 designated bridge	8000.061d10900c29	message age timer	   0.00
 designated port	8001			forward delay timer	   0.00
 designated cost	   0			hold timer		   0.00
 flags			
}}}

上面的資訊在其它 Linux 系統, 可由以下命令得知
{{{
$ sudo brctl showbr swhub
}}}
''[註]'' Ubuntu 12.04 的 brctl 命令不提供 showbr 這參數

{{item1{顯示所有橋接網路裝置}}}
{{{
$ brctl show
bridge name	bridge id		STP enabled	interfaces
NET100		8000.525400be89e7	no		NET100-nic
NET99		8000.525400da18de	no		NET99-nic
swhub		8000.061d10900c29	no		tap0
							tap1
vbr660		8000.525400aa0660	no		vbr660-nic
vbr661		8000.525400aa0661	no		vbr661-nic
vbr88		8000.525400aa0088	no		vbr88-nic
virbr0		8000.fe5400a08dac	yes		vnet0
}}}

{{item1{啟動虛擬電腦}}}

''1. 第一部 Client 虛擬電腦''
{{{
$ cd ~/iLXC/

$ kvm -name "tsc321" -m 128 -kernel kernel/vmlinuz -initrd kernel/tsc32.gz -net nic,macaddr=52:54:72:16:20:10 -net tap,ifname=tap0,script=no,downscript=no -append "ipv4=172.16.20.10:255.255.255.0:172.16.20.254:168.95.1.1" &
}}}

''2. 第二部 Client 虛擬電腦''
{{{
$ kvm -name "tsc322" -m 128 -kernel kernel/vmlinuz -initrd kernel/tsc32.gz -net nic,macaddr=52:54:72:16:20:11 -net tap,ifname=tap1,script=no,downscript=no -append "ipv4=172.16.20.11:255.255.255.0:172.16.20.254:168.95.1.1" &
}}}

{{item1{測通網路}}}

''1. 在 HOST OS 執行 ping 命令''
{{{
$ ping 172.16.20.10

$ ping 172.16.20.11

$ arp -n
Address                  HWtype  HWaddress           Flags Mask            Iface
172.16.20.11             ether   52:54:72:16:20:11   C                     swhub
172.16.20.10             ether   52:54:72:16:20:10   C                     swhub
}}}

{{item1{上網設定}}}
{{{
$ sudo iptables -t nat -A POSTROUTING -s 172.16.20.0/24 -j MASQUERADE
}}}
<<toBalaNotes "3">>

{{item1{建立橋接網路裝置的專屬網卡 (意在指定專屬 MAC 位址)}}}

''1. 建立 TAP 網卡''
{{{
$ sudo tunctl -u student -t shub66-net
Set 'shub66-net' persistent and owned by uid 1000

# 新建 TAP 網卡 (shub66-net), 內定沒有 IP, 而且沒有啟動
$ ifconfig shub66-net
shub66-net Link encap:Ethernet  HWaddr da:e5:54:85:63:5a  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''2. 指定新建 TAP 網卡的 MAC 位址''
所指定的 MAC 位址, 數字要小, 因橋接網路裝置, 會根據加入網卡裝置, 選其最小的 MAC 位址, 作為本身的 MAC 位址
{{{
$ sudo ifconfig shub66-net hw ether 52:54:00:00:00:66
$ ifconfig shub66-net
shub66-net Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''3. 建立橋接網路裝置''
{{{
$ sudo brctl addbr shub66
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr b2:d8:81:d9:53:69  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''4. 橋接網路裝置加入專屬網卡''
{{{
$ sudo brctl addif shub66 shub66-net
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''5. 指定橋接網路裝置的 IP, 並啟動''
橋接網路裝置的專屬 TAP 網卡不需要啟動
{{{
$ sudo ifconfig shub66 172.16.20.253/24 up
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          inet addr:172.16.20.253  Bcast:172.16.20.255  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''6. 加入新建虛擬電腦專屬 TAP 網卡, 並解決MAC 問題''
{{{
# 新建虛擬電腦專屬 TAP 網卡
$ sudo tunctl -u student 
Set 'tap0' persistent and owned by uid 1000

# 新建虛擬電腦專屬 tap0 網卡的 MAC 位址, 竟然比 shub66-net 的 MAC 位址來的小
# TAP 網卡的 MAC 位址是由亂數產生
$ ifconfig tap0
tap0      Link encap:Ethernet  HWaddr 16:e8:54:e3:21:73  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

# 將新建虛擬電腦專屬 tap0 網卡, 加入橋接網路裝置後, 果真改變橋接網路裝置 (shub66) 的 MAC 位址
$ sudo brctl addif shub66 tap0
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 16:e8:54:e3:21:73  
          inet addr:172.16.20.253  Bcast:172.16.20.255  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

# 因上述的 MAC 問題, 解決之道就是改變 tap0 網卡的 MAC 位址, 將它改小
$ sudo ifconfig tap0 hw ether 52:54:72:16:20:13 

# 記得一定要啟動 tap0 這張網卡
$ sudo ifconfig tap0 up

# 再次檢視 shub66 的 MAC 位址
$ ifconfig shub66
shub66    Link encap:Ethernet  HWaddr 52:54:00:00:00:66  
          inet addr:172.16.20.253  Bcast:172.16.20.255  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
}}}

''7. 測試橋接網路裝置''
{{{
$ kvm -name "tsc321" -m 128 -kernel kernel/vmlinuz64 -initrd kernel/tsc64.gz -net nic,macaddr=52:54:72:16:20:10 -net tap,ifname=tap0,script=no,downscript=no -append "ipv4=172.16.20.10:255.255.255.0:172.16.20.254:168.95.1.1" &
}}}
<<toBalaNotes "4">>



///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/
''參考網址''
1. Running PHP under CGI with Apache on Ubuntu 10.04 LTS
http://library.linode.com/web-servers/apache/php-cgi/ubuntu-10.04-lucid

{{item1{何謂 CGI 程式}}}
Web CGI programs can be written in any language which can process standard input (stdin), environment variables and write to standard output (stdout). The web server will interact with all CGI programs using the "Common Gateway Interface" (CGI) standard as set by RFC 3875. This capability is possessed by most modern computer programming and scripting languages, including the ''bash script''. 

{{item1{CGI 程式運作}}}
1. All CGI scripts must write out a header used by the browser to identify the content.
2. They typically process some input. (URL, form data or ISINDEX)
3. CGI can access environment variables set by the web server.
4. CGI scripts will write out HTML content to be viewed. This typically has the structure of the "head" which contains non-viewable content and "body" which provides the viewable content.

@@color:red;''[註] Apache 內定 CGI 目錄為 /usr/lib/cgi-bin''@@

{{item1{建置使用者 CGI 程式介面}}}
''1. 修改 /etc/apache2/httpd.conf 設定檔''
{{{
$ sudo nano /etc/apache2/httpd.conf
                       :
                       :
<Directory /home/*/www/cgi-bin>
  AllowOverride None
  Options ExecCGI -MultiViews +SymLinksIfOwnerMatch
  SetHandler cgi-script
  Order allow,deny
  Allow from all
</Directory>
}}}

''2. 建立 cgi-bin 目錄''
{{{
$ cd ~
$ cd www
$ mkdir cgi-bin
}}}

''3. 編寫 cgi 程式''
{{{
$ nano test.cgi
#!/bin/bash

echo Content-type: text/html
echo ""
echo "<html>"
echo "<body>"
echo "<h1>CGI Test</h1>"
/usr/bin/whoami
echo "</body>"
echo "</html>"
}}}

''4. 付予執行權限''
{{{
$ chmod +x test.cgi
}}}

''5. 重新啟動 Apche2''
{{{
$ sudo /etc/init.d/apache2 restart
}}}

''6. 執行 test.cgi''

在另一部電腦, 啟動瀏覽器, 然後輸入以下網址 :
{{{
http://your ip/~student/cgi-bin/test.sh
}}}

''[註]'' 上面的 URL 中, 必須輸入 ''~'' 字元, 執行 CGI 程式, 無法使用連接檔 (ln -s)

''執行結果''
{{{
CGI Test
www-data 
}}}

由執行結果得知, 真正執行 test.sh 的使用者帳號是 www-data

{{item1{檢視  /etc/apache2/envvars 檔案 (檢視執行 CGI 程式的 User 及 Group)}}}
{{{
$ sudo nano /etc/apache2/envvars
# envvars - default environment variables for apache2ctl

# Since there is no sane way to get the parsed apache2 config in scripts, some
# settings are defined via environment variables and then used in apache2ctl,
# /etc/init.d/apache2, /etc/logrotate.d/apache2, etc.
export APACHE_RUN_USER=www-data                                          # 執行 CGI 程式的 User 
export APACHE_RUN_GROUP=www-data                                       # 執行 CGI 程式的 Group
export APACHE_PID_FILE=/var/run/apache2.pid

## The locale used by some modules like mod_dav
export LANG=C
## Uncomment the following line to use the system default locale instead:
#. /etc/default/locale

export LANG

## The command to get the status for 'apache2ctl status'.
## Some packages providing 'www-browser' need '--dump' instead of '-dump'.
#export APACHE_LYNX='www-browser -dump'
                                :
}}}
<<toBalaNotes "1">>



///%1
//%/
''參考文章''
1. Bash shell CGI
http://www.yolinux.com/TUTORIALS/BashShellCgi.html
2. Apache CGI 程式環境建立和資源程式應用
http://mouse.oit.edu.tw/htdocs/Hope/199901/CONTENT/CGI.HTM
3. 15 Practical Linux cURL Command Examples (一定要看) 
http://www.thegeekstuff.com/2012/04/curl-examples/

{{item1{檢視 Request/Response 封包內容}}}

''Request/Response 封包標頭內容''

1. 啟動 Firefox 瀏覽器, 並啟動 Firebug Plugin 的 ''網路'' 功能

[img[img/firebug/firebug01.png]]

[img[img/firebug/firebug02.png]]

2. 輸入以下 URL 
{{{
http://tobala.net/cgi-bin/x/ajaxreq.sh
}}}

[img[img/firebug/firebug03.png]]

''回應網頁內容''
{{{
SERVER_SIGNATURE=
Apache/2.2.16 (CentOS) mod_ssl/2.2.16 0.9.8l DAV/2 mod_fcgid/2.3.5 mod_auth_passthrough/2.1 FrontPage/5.0.2.2635 Server at tobala.net Port 80


UNIQUE_ID=THPyykrc23oAAGCM7jMAAADQ
HTTP_KEEP_ALIVE=115
HTTP_USER_AGENT=Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8
SERVER_PORT=80
HTTP_HOST=tobala.net
DOCUMENT_ROOT=/home6/tbalanet/public_html
HTTP_ACCEPT_CHARSET=UTF-8,*
SCRIPT_FILENAME=/home6/tbalanet/public_html/cgi-bin/x/ajaxreq.sh
REQUEST_URI=/cgi-bin/x/ajaxreq.sh
SCRIPT_NAME=/cgi-bin/x/ajaxreq.sh
HTTP_CONNECTION=keep-alive
REMOTE_PORT=1551
PATH=/usr/local/bin:/usr/bin:/bin
PWD=/home6/tbalanet/public_html/cgi-bin/x
SERVER_ADMIN=webmaster@tbala.net
HTTP_ACCEPT_LANGUAGE=zh-tw,en-us;q=0.7,en;q=0.3
HTTP_ACCEPT=text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
REMOTE_ADDR=115.82.191.211
SHLVL=1
SERVER_NAME=tobala.net
SERVER_SOFTWARE=Apache/2.2.16 (CentOS) mod_ssl/2.2.16 0.9.8l DAV/2 mod_fcgid/2.3.5 mod_auth_passthrough/2.1 FrontPage/5.0.2.2635
QUERY_STRING=
SERVER_ADDR=69.89.27.215
GATEWAY_INTERFACE=CGI/1.1
SERVER_PROTOCOL=HTTP/1.1
HTTP_CACHE_CONTROL=max-age=0
HTTP_ACCEPT_ENCODING=gzip,deflate
REQUEST_METHOD=GET
_=/usr/bin/env
}}}

''[註]'' 傳回資訊中, 有詳細的 Apache 運作資訊

<<toBalaNotes "head">>

{{item1{取得 CGI 程式執行期間的環境變數}}}

''$ nano ~/www/cgi-bin/cgienv.sh''
{{{
#!/bin/bash

echo "Content-type: text/html"
echo ""

echo '<html>'
echo '<head>'
echo '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">'
echo '<title>Environment Variables</title>'
echo '</head>'
echo '<body>'
echo 'Environment Variables:'
echo '<pre>'
/usr/bin/env
echo '</pre>'

echo '</body>'
echo '</html>'

exit 0
}}}

<<toBalaNotes "env">>

{{item1{遠端執行 Linux 命令}}}

''$ nano ~/www/cgi-bin/uptime.sh''
{{{
#!/bin/bash

echo "Content-type: text/html"
echo ""

echo '<html>'
echo '<head>'
echo '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">'

PATH="/bin:/usr/bin:/usr/sbin:/usr/opt/bin"
export $PATH

echo '<title>System Uptime</title>'
echo '</head>'
echo '<body>'

echo '<h3>'
hostname
echo '</h3>'

uptime

echo '</body>'
echo '</html>'

exit 0
}}}

<<toBalaNotes "cmd">>

{{item1{接受表單資料}}}
''$ nano ~/www/cgi-bin/ajaxform.sh''
{{{
#!/bin/bash

# let the browser know that this is html code
echo "Content-type: text/html"
echo ""

# read in our parameters
ID=`echo "$QUERY_STRING" | sed -n 's/^.*id=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
TEXT=`echo "$QUERY_STRING" | sed -n 's/^.*text=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`

# our html code
echo "<html>"
echo "<head><title>Hello CGI</title></head>"
echo "<body>"

# test if any parameters were passed
if [ $ID ] && [ $TEXT ]
then
	echo "The parameters were set <br>"
	echo "the value of ID is $ID <br>"
	echo "the value of TEXT is $TEXT <br>"
else
	echo "<form method=get>"
	echo "ID : <input type=text name=id><br>"
	echo "TEXT : <input type=text name=text><br>"
	echo "TEXT : <input type=submit>"
	echo "</form>"
fi

echo "</body>"
echo "</html>"
}}}

In order to process these, we need to do some ugly bashing of the environment string.
{{{
ID=`echo "$QUERY_STRING" | sed -n 's/^.*id=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
TEXT=`echo "$QUERY_STRING" | sed -n 's/^.*text=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
}}}
WHAT IS THIS ?!?!? It is actually easier than it looks :-)

The first line parses a numeric parameter and save it in the environment variable ID.
The second line parses an alpha-numeric parameter and save it in the environment variable TEXT.

Let me explain step by step:
{{{
* echo "$QUERY_STRING" | sed
  send the value of $QUERY_STRING to sed
* sed -e 's/^.*id=\([^&]*\).*$/\1/'
   cut out only the value of "id="
* sed -e 's/^.*text=\([^&]*\).*$/\1/'
   cut out only the value of "text="
* sed "s/%20/ /g"`
   replace the html escaped %20 for spaces 
}}}

<<toBalaNotes "form">>

We just wrote our first bash based CGI that actually does something half reasonable. Let's look at a more complicated example that does a certain action based on the actual value of the parameter. The following example gives the user a number of radio buttons. Depending on which one was set, a different command is executed. You will also learn how to use the case construct inside a bash script.

{{{
#!/bin/sh
echo "Content-type: text/html\n"

# read in our parameters
CMD=`echo "$QUERY_STRING" | sed -n 's/^.*cmd=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
FOLDER=`echo "$QUERY_STRING" | sed -n 's/^.*folder=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"| sed "s/%2F/\//g"`

# our html header
echo "<html>"
echo "<head><title>Hello CGI</title></head>"
echo "<body>"


# test if any parameters were passed
if [ $CMD ]
then
  case "$CMD" in
    ifconfig)
      echo "Output of ifconfig :<pre>"
      /sbin/ifconfig
      echo "</pre>"
      ;;

    uname)
      echo "Output of uname -a :<pre>"
      /bin/uname -a
      echo "</pre>"
      ;;

    dmesg)
      echo "Output of dmesg :<pre>"
      /bin/dmesg
      echo "</pre>"
      ;;

    ls)
      echo "Output of ls $FOLDER :<pre>"
      /bin/ls "$FOLDER"
      echo "</pre>"
      ;;

    *)
      echo "Unknown command $CMD<br>"
      ;;
  esac
fi

# print out the form
echo "Choose which command you want to run"
echo "<form method=get>"
echo "<input type=radio name=cmd value=ifconfig checked> ifconfig <br>"
echo "<input type=radio name=cmd value=uname> uname -a <br>"
echo "<input type=radio name=cmd value=dmesg> dmesg <br>"
echo "<input type=radio name=cmd value=ls> ls  -- folder <input type=text name=folder value=/mnt/flash><br>"
echo "<input type=submit>"
echo "</form>"
echo "</body>"
echo "</html>"
}}}

[img[img/cgi01.jpg]]

<<toBalaNotes "3">>



{{item1{設定 www-data 群組可執行 sudo 命令, 並取消輸入密碼}}}
{{{
$ sudo nano /etc/sudoers
                   :
%www-data ALL=(ALL) NOPASSWD:ALL               # 自行加入這一行
}}}

''產生使用者帳號''
{{{
$ nano createuser.sh
#!/bin/bash

echo Content-type: text/html
echo ""
echo "<html>"
echo "<body>"
echo "<h1>Create aaa</h1>"
sudo /usr/sbin/useradd -m -s /bin/bash aaa &>/tmp/error
echo $?
echo "</body>"
echo "</html>"
}}}

''遠端關機''
{{{
$ nano /usr/lib/cgi-bin/down.sh
#!/bin/bash

echo Content-type: text/html
echo ""
echo "<html>"
echo "<body>"
echo "<h1>Shutdown</h1>"
sudo shutdown -h now

echo "</body>"
echo "</html>"
}}}

上面程式執行, 在 /var/log/apache2/error.log 這錯誤訊息檔, 會出現以下錯誤訊息 :
{{{
# tail /var/log/apache2/error.log 
/usr/lib/cgi-bin/down.sh: line 8: shutdown: command not found
/usr/lib/cgi-bin/down.sh: line 8: shutdown: command not found
}}}

''修改 down.sh 程式''
{{{
$ nano /usr/lib/cgi-bin/down.sh
#!/bin/bash

echo Content-type: text/html
echo ""
echo "<html>"
echo "<body>"
echo "<h1>Shutdown</h1>"
sudo /sbin/shutdown -h now

echo "</body>"
echo "</html>"
}}}

''@@color:red;另一個執行方法 : @@'' 只要修改 /sbin/shutdown 檔案屬性 (chmod 4755 /sbin/shutdown), 不需修改 /etc/sudoers 
<<toBalaNotes "4">>




///%head
//%/

///%env
//%/

///%cmd
//%/

///%form
//%/

///%3
//%/

///%4
//%/

{{item1{HDP131 系統準備 - DataNode}}}

''1. 登入 HDP120''
{{{
$ sudo virsh console HDP120
Connected to domain HDP120
Escape character is ^]

}}}

''2. 修改 /etc/hosts 檔案''
請加入 "192.168.100.30  HDP131" 這行資料
{{{
$ nano /etc/hosts
127.0.0.1       localhost
192.168.100.20  HDP120
192.168.100.21  HDP121
192.168.100.22  HDP122
192.168.100.30  HDP130
192.168.100.31  HDP131
}}}

''3. 將 /etc/hosts 複製給 HDP131''
{{{
# scp /etc/hosts root@192.168.100.31:/etc/hosts
The authenticity of host '192.168.100.31 (192.168.100.31)' can't be established.
RSA key fingerprint is 8d:b7:99:60:7f:39:05:b5:09:5f:ed:a4:af:27:cb:46.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.100.31' (RSA) to the list of known hosts.
root@192.168.100.31's password: 
hosts                                         100%  306     0.3KB/s   00:00  

}}}

''4. 將 HDP120 的 SSH Server 憑證, 複製給 HDP131''
{{{
# scp ~/.ssh/id_dsa.pub root@HDP131:/root/.ssh/authorized_keys
The authenticity of host 'hdp131 (192.168.100.31)' can't be established.
RSA key fingerprint is 8d:b7:99:60:7f:39:05:b5:09:5f:ed:a4:af:27:cb:46.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hdp131' (RSA) to the list of known hosts.
root@hdp131's password: 
id_dsa.pub                                    100%  601     0.6KB/s   00:00  
}}}

''5. 指定 HDP131 為 DataNode''
將 HDP131 加入 slaves, 並將 HDP120 移除
{{{
# cd /mnt/hda1/hadoop-1.0.3

# nano conf/slaves
HDP121
HDP122
HDP131
}}}

''6. 登入 HDP131, 儲存新複製的檔案''
{{{
# ssh HDP131

# filetool.sh -b
Backing up files to /mnt/hda1/tce/mydata.tgz 

}}}

''7. 建立 HDP131 資料目錄''
{{{
$ cd /mnt/hda1/hadoop-1.0.3

$ mkdir data
}}}

''8. 脫離 HDP131''
{{{
# exit
}}}

''9. 脫離 HDP120''
{{{
請按 Ctrl + ]
}}}
<<toBalaNotes "1">>
{{item1{設定 DataNode}}}
''1. 登入 HDP131''
{{{
$ sudo virsh console HDP131
Connected to domain HDP131
Escape character is ^]

Micro Core Linux 3.8.2
HDP131 login: root
Password: 
}}}
''[註]'' 登入帳號 root, 密碼為 student


''2. 修改 core-site.xml 設定檔''
{{{
# cd /mnt/hda1/hadoop-1.0.3/

# nano conf/core-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>fs.default.name</name>
         <value>hdfs://HDP120:9000</value>
     </property>
     <property>
         <name>hadoop.tmp.dir</name>
         <value>/mnt/hda1/hadoop-1.0.3/data</value>
     </property>
</configuration>
}}}

''3. 修改 hdfs-site.xml  設定檔''
{{{
# nano conf/hdfs-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>dfs.replication</name>
         <value>2</value>
     </property>
</configuration>
}}}

''4. 啟動 datanode 服務''
{{{
# hadoop-daemon.sh start datanode
}}}
''[註]'' 啟動 tasktracker 服務命令如下 :
{{{
# hadoop-daemon.sh start tasktracker
}}}

''5. 檢測 HDP131 是否成為 DataNode''
{{{
# hadoop dfsadmin -report
Configured Capacity: 16662200320 (15.52 GB)
Present Capacity: 13815033856 (12.87 GB)
DFS Remaining: 13814894592 (12.87 GB)
DFS Used: 139264 (136 KB)
DFS Used%: 0%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
-------------------------------------------------
Datanodes available: 4 (4 total, 0 dead)

Name: 192.168.100.22:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 28672 (28 KB)
Non DFS Used: 397152256 (378.75 MB)
DFS Remaining: 3828944896(3.57 GB)
DFS Used%: 0%
DFS Remaining%: 90.6%
Last contact: Fri Apr 19 14:44:31 CST 2013

Name: 192.168.100.31:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 28672 (28 KB)
Non DFS Used: 833675264 (795.05 MB)
DFS Remaining: 3392421888(3.16 GB)
DFS Used%: 0%
DFS Remaining%: 80.27%
Last contact: Fri Apr 19 14:44:30 CST 2013

Name: 192.168.100.20:50010
Decommission Status : Normal
Configured Capacity: 3983822848 (3.71 GB)
DFS Used: 40960 (40 KB)
Non DFS Used: 1219186688 (1.14 GB)
DFS Remaining: 2764595200(2.57 GB)
DFS Used%: 0%
DFS Remaining%: 69.4%
Last contact: Fri Apr 19 14:44:29 CST 2013

Name: 192.168.100.21:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 40960 (40 KB)
Non DFS Used: 397152256 (378.75 MB)
DFS Remaining: 3828932608(3.57 GB)
DFS Used%: 0%
DFS Remaining%: 90.6%
Last contact: Fri Apr 19 14:44:30 CST 2013
}}}
<<toBalaNotes "2">>
{{item1{移除 DataNode - HDP120}}}
''1. 登入 HDP120''
{{{
$ sudo virsh console HDP120
Connected to domain HDP120
Escape character is ^]

}}}

''2. 建立 DataNode 移除名單檔''
{{{
# cd /mnt/hda1/hadoop-1.0.3
# nano conf/exclude
HDP120

}}}

''3. 設定 DataNode 移除名單檔''
{{{
# nano conf/hdfs-site.xml 
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
     <property>
         <name>dfs.http.address</name>
         <value>HDP120:50070</value>
     </property> 

     <property>  
         <name>dfs.secondary.http.address</name>  
         <value>HDP130:50090</value>  
     </property>  

     <property>
         <name>dfs.replication</name>
         <value>2</value>
     </property>

     <property>
         <name>dfs.hosts.exclude</name>
         <value>/mnt/hda1/hadoop-1.0.3/conf/exclude</value>
     </property>

</configuration>
}}}

''4. 套用 DataNode 移除名單檔''
{{{
# hadoop dfsadmin -report
Configured Capacity: 16662200320 (15.52 GB)
Present Capacity: 13814939633 (12.87 GB)
DFS Remaining: 13814788096 (12.87 GB)
DFS Used: 151537 (147.99 KB)
DFS Used%: 0%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
-------------------------------------------------
Datanodes available: 3 (4 total, 1 dead)

Name: 192.168.100.22:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 28672 (28 KB)
Non DFS Used: 397160448 (378.76 MB)
DFS Remaining: 3828936704(3.57 GB)
DFS Used%: 0%
DFS Remaining%: 90.6%
Last contact: Fri Apr 19 20:24:45 CST 2013

Name: 192.168.100.31:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 40960 (40 KB)
Non DFS Used: 833683456 (795.06 MB)
DFS Remaining: 3392401408(3.16 GB)
DFS Used%: 0%
DFS Remaining%: 80.27%
Last contact: Fri Apr 19 20:24:45 CST 2013

Name: 192.168.100.21:50010
Decommission Status : Normal
Configured Capacity: 4226125824 (3.94 GB)
DFS Used: 40960 (40 KB)
Non DFS Used: 397160448 (378.76 MB)
DFS Remaining: 3828924416(3.57 GB)
DFS Used%: 0%
DFS Remaining%: 90.6%
Last contact: Fri Apr 19 20:24:42 CST 2013

Name: 192.168.100.20:50010
Decommission Status : Decommissioned
Configured Capacity: 3983822848 (3.71 GB)
DFS Used: 40945 (39.99 KB)
Non DFS Used: 1219256335 (1.14 GB)
DFS Remaining: 2764525568(2.57 GB)
DFS Used%: 0%
DFS Remaining%: 69.39%
Last contact: Fri Apr 19 20:12:49 CST 2013
}}}
<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/
{{item1{新增使用者帳號}}}

''1. 建立存放使用者帳號資料庫的目錄''
{{{
$ mkdir /usr/lib/cgi-bin/data
$ chown root:www-data /usr/lib/cgi-bin/data
$ chmod 775 /usr/lib/cgi-bin/data
}}}

''2. 撰寫程式''
{{{
$ nano adduser.sh
#!/bin/bash

# let the browser know that this is html code
echo "Content-type: text/html"
echo ""

# read in our parameters
ID=`echo "$QUERY_STRING" | sed -n 's/^.*id=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
PASS=`echo "$QUERY_STRING" | sed -n 's/^.*pass=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
TEXT=`echo "$QUERY_STRING" | sed -n 's/^.*text=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`

[ ! -f data/users.db ] && touch data/users.db

if [ ! -z "$ID" ]; then
   grep "<div id='$ID'>" data/users.db &>/dev/null
   if [ "$?" == "0" ]; then
      echo "2"
   else
      echo -e "<div id='$ID'>\n  <div>$ID</div>\n  <div>$PASS</div>\n  <p>$TEXT</p>\n</div>" >> data/users.db
      echo  "0"
   fi
else
   echo "1"
fi
}}}

@@color:red;''[註] Apache 內定 CGI 目錄為 /usr/lib/cgi-bin''@@

''3. 測試''
{{{
http://140.137.214.250/cgi-bin/adduser.sh?id=123&pass=123&text=123
}}}

<<toBalaNotes "1">>

{{item1{帳號登入}}}

''1. 撰寫程式''
{{{
$ nano login.sh

#!/bin/bash

# let the browser know that this is html code
echo "Content-type: text/html"
echo ""

# read in our parameters
ID=`echo "$QUERY_STRING" | sed -n 's/^.*id=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
#PASS=`echo "$QUERY_STRING" | sed -n 's/^.*pass=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
#TEXT=`echo "$QUERY_STRING" | sed -n 's/^.*text=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`

if [ ! -z "$ID" ]; then
   if [ ! -f data/users.db ]; then
      echo "<script>alert('檔案不存在')</script>"
      return
   fi
   grep "<div id='$ID'>" data/users.db &>/dev/null
   if [ "$?" == "0" ]; then
      echo "<script>alert('登入成功')</script>"
   else
      echo "<script>alert('帳號不存在')</script>"
   fi
else
   echo "<script>alert('帳號不存在')</script>"
fi
}}}

''2. 測試''
{{{
http://140.137.214.250/cgi-bin/login.sh?id=123
}}}

<<toBalaNotes "2">>

{{item1{以 HTTP POST 命令上傳資料}}}
程式範例網址 : http://paulturner.me/2009/07/reading-http-post-data-using-bash/

''1. 撰寫程式''
{{{
$ nano postdata.sh

#!/bin/bash
echo "Content-type: text/html; charset=UTF-8"
echo "Status: 200 OK"
echo ""

if [ "$REQUEST_METHOD" = "POST" ]; then
    if [ "$CONTENT_LENGTH" -gt 0 ]; then
        read -n $CONTENT_LENGTH POST_DATA <&0
    fi
fi

echo "<script>alert(decodeURI('${POST_DATA}'))</script>"
}}}

''2. 編輯 formpost.html 網頁 (此網頁一定要存到 /var/www 目錄中)''
{{{
$ nano formpost.html

<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Form</title>
</head>
<body>
  <form action="/cgi-bin/postdata.sh" method="post">
    ID : <input type=text name=id><br/>
    Text : <input type=text name=text><br/>
    <input type="submit" value="submit">
  </form>
</body>
</html>
}}}

''3. 執行''
{{{
http://140.137.214.250/formpost.html
}}}

<<toBalaNotes "3">>
///%1
//%/

///%2
//%/

///%3
//%/
{{item1{建立測試文字檔}}}
{{{
$ nano mytext 
cat in the hat
dog in the sky

# 將 mytext 文字檔, 上載至 HDFS 檔案系統
$ hadoop dfs -put mytext /
}}}

{{item1{使用 cat 當 mapper,使用 wc 當 reducer}}}
{{{
# 因 cat 命令有 "讀入檔案" 及 "輸出檔案內容" 這二個動作, 所以適合做 mapper , 執行結果會存在 /mytextout 目錄中
$ hadoop jar /opt/hadoop/contrib/streaming/hadoop-streaming-1.0.4.jar -input /mytext -output /mytextout -mapper /bin/cat -reducer /usr/bin/wc

# 檢視執行結果
$ hadoop dfs -cat /mytextout/part-00000
      2       8      32	
}}}
<<toBalaNotes "1">>
{{item1{撰寫 mapper.sh}}}
以下範例說明, 請參考下列文章 :
1. Using Hadoop Streaming and bash scripts to generate an xml file 
http://blog.pfa-labs.com/2010/11/using-hadoop-streaming-and-bash-scripts.html

''1. 撰寫 mapper.sh''
這程式只會將每行中 "dog" 或 "cat" 輸出給 reducer.sh
{{{
#!/bin/bash

echo "" > $1
while read x
do
  for y in $x
  do
    if [ "$y" == "dog" -o "$y" == "cat" ]; then
       echo $y 
    fi
  done 
done
}}}

''2. 執行 mapper.sh''
以下命令, 利用 mapred.reduce.tasks=0 取消 reducer 功能
{{{
$ hadoop jar /opt/hadoop/contrib/streaming/hadoop-streaming-1.0.4.jar -D mapred.reduce.tasks=0 -input /mytext -output /mytextout -mapper mapper.sh 
}}}

''3. 檢視執行結果''
{{{
$ hadoop dfs -cat /mytextout/part-00000
cat	
dog	
}}}
<<toBalaNotes "2">>
{{item1{使用 Bash Shell Script 當 Mapper 與 Reducer}}}
''1. 撰寫 streamingMapper.sh 及 streamingReducer.sh''
{{{
$ echo "sed -e \"s/ /\n/g\" | grep ." > streamingMapper.sh
$ echo "uniq -c | awk '{print \$2 \"\t\" \$1}'" > streamingReducer.sh
$ chmod +x *.sh
}}}

''2. 刪除 /mytextout 目錄''
{{{
$ hadoop dfs -rmr /mytextout
}}}

''3. 執行 streamingMapper.sh 及 streamingReducer.sh''
{{{
$ hadoop jar /opt/hadoop/contrib/streaming/hadoop-streaming-1.0.4.jar -input /mytext -output /mytextout -mapper streamingMapper.sh -reducer streamingReducer.sh 
}}}

''4. 檢視執行結果''
{{{
$ hadoop dfs -cat /mytextout/part-00000cat	1
dog	1
hat	1
in	2
sky	1
the	2
}}}
<<toBalaNotes "3">>

///%1
//%/

///%2
//%/

///%3
//%/

{{item1{明碼認證}}}

''1. 產生目錄設定檔 (.htaccess)''
{{{
$ cd ~/www
$ nano .htaccess
AuthName     "Protect by .htaccess"
AuthType     Basic
AuthUserFile /home/student/apache.passwd
Require valid-user
}}}

''2. 產生密碼''
{{{
$ htpasswd -c ~/apache.passwd student
New password:
Re-type new password:
Adding password for user student
}}}

''[註]'' 設定第二個帳號, 不需 -c 這參數
Note that using the -c flag with htpasswd on an existing file will clear the file's contents completely and create a new one.

''3. 測試''
啟動瀏覽器, 輸入以下網址 (一定要有 ~) :
{{{
http://192.168.99.6/~student
}}}
這時會被要求輸入帳號與密碼

[img[img/apache2/a2basic.png]]
<<toBalaNotes "1">>


///%1
//%/
''參考文章''
[[1. MapReduce  and Bigtable (寫的不錯)|http://www.google.com.tw/url?sa=t&source=web&ct=res&cd=4&ved=0CB8QFjAD&url=http%3A%2F%2Foasis.csie.ntu.edu.tw%2F2007_fall_network%2Fslides%2F42&ei=vHpzS9XmKMuHkQXIqY33CQ&usg=AFQjCNG1KuGKqu8_gh9be0ciq4lG0ms2pg&sig2=TVuCkZkzgX5_CDHEUiFKHg]]

MapReduce 是 Google 提出的一個軟體架構,用於大規模數據集(大於1TB)的并行運算。概念"Map(映射)"和"Reduce(化簡)",和他們的主要思想,都是從函數式程式語言借來的,還有從向量程式語言借來的特性。[1]

當前的軟體實現是指定一個 Map(映射)函數,用來把一組鍵值對映射成一組新的鍵值對,指定併發的 Reduce(化簡)函數,用來保證所有映射的鍵值對中的每一個共享相同的鍵組。

{{item1{映射和化簡}}}
簡單說來,一個映射函數就是對一些獨立元素組成的概念上的列表(例如,一個測試成績的列表)的每一個元素進行指定的操作(比如前面的例子裡,有人發現所有學生的成績都被高估了一分,他可以定義一個「減一」的映射函數,用來修正這個錯誤。)。事實上,每個元素都是被獨立操作的,而原始列表沒有被更改,因為這裡創建了一個新的列表來保存新的答案。這就是說,Map操作是可以高度并行的,這對高性能要求的應用以及并行計算領域的需求非常有用。

而化簡操作指的是對一個列表的元素進行適當的合併(繼續看前面的例子,如果有人想知道班級的平均分該怎麼做?他可以定義一個化簡函數,通過讓列表中的奇數(odd)或偶數(even)元素跟自己的相鄰的元素相加的方式把列表減半,如此遞歸運算直到列表只剩下一個元素,然後用這個元素除以人數,就得到了平均分)。雖然他不如映射函數那麼并行,但是因為化簡總是有一個簡單的答案,大規模的運算相對獨立,所以化簡函數在高度并行環境下也很有用。

{{item1{分佈和可靠性}}}
MapReduce 通過把對數據集的大規模操作分發給網路上的每個節點實現可靠性;每個節點會周期性的把完成的工作和狀態的更新報告回來。如果一個節點保持沉默超過一個預設的時間間隔,主節點(類同Google檔案系統中的主伺服器)記錄下這個節點狀態為死亡,並把分配給這個節點的數據發到別的節點。每個操作使用命名文件的原子操作以確保不會發生并行執行緒間的衝突;當文件被改名的時候,系統可能會把他們複製到任務名以外的另一個名字上去。(避免副作用)。

化簡操作工作方式很類似,但是由於化簡操作在并行能力較差,主節點會盡量把化簡操作調度在一個節點上,或者離需要操作的數據儘可能近的節點上了;這個特性可以滿足 Google 的需求,因為他們有足夠的頻寬,他們的內部網路沒有那麼多的機器。

{{item1{用途}}}
在 Google,MapReduce 用在非常廣泛的應用程序中,包括「分佈 grep,分佈排序,web 連接圖反轉,每台機器的詞向量,web訪問日誌分析,反向索引構建,文檔聚類,機器學習,基於統計的機器翻譯……」值得注意的是,MapReduce 實現以後,它被用來重新生成 Google 的整個索引,並取代老的 ad hoc 程序去更新索引。

MapReduce會生成大量的臨時文件,為了提高效率,它利用 Google 檔案系統來管理和訪問這些文件。

{{item1{資料庫系統是榔頭; MapReduce 則是螺絲起子}}}
本文網址 : http://www.hadoop.tw/2008/10/databases-are-hammers-mapreduce-is-a-screwdriver.html

最近年來不管是雲端運算(Cloud computing) 或是網格運算(Grid computing) 都相當的熱門, 而它們的核心技術 MapReduce 則受到相當程度的重視. 04 年時 MapReduce 一開始被 Google 所提出來, 它本身是一種程式開發的模式,可以用來處理大量的資料. 但因為長時間以來, 不管是在學校或是業界, 資料庫系統都廣泛的被教授和使用,很多人一看到"處理大量的資料", 自然的就會把 MapReduce 跟資料庫系統放在一起, 拿來做比較等等, 因而產生一些錯誤的認知.

目前任職在 Google 的 Mark 日前撰寫了一篇文章 [[Databases are hammers; MapReduce is a screwdriver|http://scienceblogs.com/goodmath/2008/01/databases_are_hammers_mapreduc.php]] 來說明這兩者的不同. 這篇文章是我個人認為目前在網路上說明 MapReduce 文章當中寫的最貼切且淺顯易懂的. 經過原作者的同意 , 我把它翻譯成中文, 希望讓更多人藉著閱讀這篇文章來了解 MapReduce.

資料庫系統是榔頭; MapReduce 則是螺絲起子.

有一群朋友們把一篇關於批評MapReduce的文章傳給我. 我曾經猶豫是否我應該寫些什麼, 因為目前為止有很多關於MapReduce的東西正被開發中, 而且被Google的同仁們廣泛的使用. 但是這篇文章真的是有點惱人, 我理當回應它. 所以我在這邊先做個澄清, 我並不是以一位Google員工的身份來評論它.(事實上, 我在工作上並沒有使用MapReduce). 這純粹是用我自己的時間, 發表我個人的意見. 如果您覺得這是一篇愚蠢的文章, 那是我個人的錯跟Google無關. 如果您覺得這篇文章寫的很出色, 那也是我個人的榮幸跟Google無關. Google 並沒有要求我寫這篇文章, 同樣的我也沒有要求 Google 的同意. 這僅只是我個人的行為, 了解嗎?

我會對這件事情有興趣的原因是因為這跟我的博士研究有關. 回想起研究所那時候, 我的研究是關於在非科學性的應用軟體上, 將平行運算的技術應用在結構化資料上.這差不多是 MapReduce 所要做的事情. 而我所提出的解決方案是某種階層式的分散(scatter)/收集(gather)運算子,非常接近 MapReduce 運作的方式. 那最大的差異在哪邊? 那就是 MapReduce 打敗了我, 設計MapReduce 的人注意到我沒注意到的東西, 並且利用了它使得 MapReduce 程式更乾淨, 更容易開發.

讓我們從頭開始, MapReduce 是啥, 它能做什麼?

今天假設你在工作, 你需要做某件事情, 但是它在你的電腦上會跑蠻久的時間, 你不想等但是你也不想要花幾百萬去買台超級電腦, 那你要怎樣才能夠讓它跑快一點? 有一個方法是買一群便宜的電腦, 讓它們同時一起幫你執行工作. 另一方面你也注意到辦公室內有很多台電腦, 幾乎每個員工座位上都有一台, 在某個時間點上大部分的電腦並沒有在做太多事情, 那為何不利用它們呢? 當你的機器不甚忙碌的時候, 你允許你的同事借用你當下並不在使用的資源, 當你需要的時候也可以借用他們的機器. 所以當你需要執行大的工作的時候, 你可以輕鬆的找到好幾打的機器來用.

這個方法最大的問題在於, 大部分的程式並沒撰寫成可以在多台的機器上執行的模式, 它們被寫成只能在一臺機器上執行. 而要把一件困難的工作分到很多台機器執行上是件困難的事情.

MapReduce 是一種程式語言開發模式, 它讓你可以透過撰寫特有的制式化程式, 輕鬆的把工作分散到一群機器上面去執行. 它基本的概念在於你把工作分成兩部份 Map 以及 Reduce. Map 基本上負責把問題分成好幾份並且送到不同的機器去,所以它們可以同時被處理. Reduce 則接收每份問題的處理的結果, 最後組合成單一的答案.

MapReduce 運作的關鍵在於, 把輸入的資料在概念上當成成一個包含很多筆紀錄的 list, 透過 map 這些紀錄被分配到不同的機器上去處理. map 計算的結果是一個包含許多 key/value 組的 list. Reduce 會取得同樣 key 值的所有 value, 並組合成為最後的單一 value. 也就是 Map 將資料產生成為 key/value 組, reduce 則組合結果給你單一的結果. 你無法知道這個工作是被分成100份或是2份, 而最後應該是跟只有單一 map 時候是一樣的. (這就是 MapReduce 和我博士研究方法不同的地方, 領悟到把結果當作是 key/value map 來看,你會得到非常乾淨且一致的歸納流程. 雖然我的 scatter/gather 模式是同樣的概念 , 但是我的 reduces 跟 Map/Reduce 比起來相對的醜陋許多.)

MapRedue 美麗的地方在於撰寫它是多麼的簡單, 在平行處理程式語言的領域上從來沒這麼簡單過.

回到該篇批評 MapReduce 的文章, 他們批評 MapRduce, 其實基本上並不是基於關聯式資料庫(Relational databaes) 的概念.

當我第一次花時間學習關聯式資料庫的時候, 我的老闆告訴我ㄧ個關於資料庫專家的故事, 是我認為最貼切的故事. 這個故事是說關聯式資料庫的專家們發現了這個世界上最美麗,最漂亮,最完美的榔頭, 完美到不會太重,也不會太輕,恰到好處的可以把釘子給釘進去,它的柄是根據所有者手的角度去特製化的, 所以釘一整天也不會起水泡.同時它也是裝飾的很漂亮的榔頭, 上面有寶石鑲嵌跟黃金的工飾在適當的地方, 絲毫不會減損榔頭的功能. 它真的是一個最偉大的榔頭.資料庫專家們熱愛他們的榔頭, 因為它是多麼美妙的工具. 而且他們真的利用這個工具做出許偉大的東西. 他們是如此的喜歡關聯式資料庫,以致於認為這是這是他們唯一需要的工具. 如果你給他們一個螺絲釘, 他們會把它當成釘子一樣的釘進去. 當你提醒他們說, 嘿這樣會弄壞東西 , 那是螺絲釘不是釘子, 他們會說"我知道阿, 但是我有這隻極好的榔頭 , 你不能期待我使用那鱉腳的小螺絲起子"

現況是這樣, 他們有關聯式資料庫, 關聯式資料庫絕對是個傑出的東西, 是個驚人的工具可以幫我們做出驚人的軟體. 我自己曾經利用關聯式資料庫完成許多工作, 沒有它, 我將沒有辦法完成一些令我感到驕傲的事情. 我完全不想貶低關聯式資料庫, 它真的是偉大的系統.但是不是每件事情都是關聯式資料庫, 不是每件事情天生就適合把它當作關聯來看待. 所有對 Mapreduce 的指責都源於"這不是我們關聯式資料庫會做的方式", 而並沒有了解到事情的重點. 關聯式資料庫的平行處理並不是很好, 你知道有多少關聯式資料庫可以有效的把工作分給 1,000 台普通機器? 關聯式資料庫無法很好的處理非表格式的資料, 像是面對遞迴資料的處理上可以說是是聲名狼藉. MapReduce 不是為了要取代關聯式資料庫, 它是提供一種輕便的程式語言開發方式, 讓大家可以快速且以平行的方式在一群機器上面執行, 僅止於這樣.



{{item1{下載 HBase 套件}}}
以下命令在 HDP120 系統執行
{{{
# # cd /mnt/hda1/

# curl -s http://ftp.twaren.net/Unix/Web/apache/hbase/stable/ | grep hbase-
<img src="/icons/compressed.gif" alt="[   ]"> <a href="hbase-0.94.7-security.tar.gz">hbase-0.94.7-security.tar.gz</a> 26-Apr-2013 06:08   55M  
<img src="/icons/compressed.gif" alt="[   ]"> <a href="hbase-0.94.7.tar.gz">hbase-0.94.7.tar.gz</a>          26-Apr-2013 06:08   55M 

# wget http://ftp.twaren.net/Unix/Web/apache/hbase/stable/hbase-0.94.7.tar.gz
}}}

''The 0.95.x releases are NOT stable.'' These are "Development" Series releases, a set of "rough cuts" taken from the branch that will eventually become our next stable release. We encourage you to download the 0.95.x bundle and take it for a spin, and pass us feedback. Theses 0.95.x series releases are NOT for production deploy. To learn more about our versioning, see HBase Versioning in our reference guide.

{{item1{解壓縮 HBase 套件}}}
{{{
$ tar xvfz hbase-0.94.7.tar.gz 
}}}

{{item1{名稱解析}}}
{{{
# nano /etc/hosts
127.0.0.1	localhost
192.168.100.20  HDP120
192.168.100.21  HDP121
192.168.100.22  HDP122
192.168.100.30  HDP130
192.168.100.31  HDP131
                  :
}}}
<<toBalaNotes "1">>
{{item1{產生 HBase 系統資料夾}}}
{{{
# cd hbase-0.94.7/

# mkdir hdbstore
}}}

{{item1{設定 HBase 系統}}}
{{{
# nano conf/hbase-site.xml 
                                :
                                :
<configuration>
  <property>
    <name>hbase.rootdir</name>
    <value>hdfs://HDP120:9000/hbase</value>
  </property>

  <property>
    <name>hbase.tmp.dir</name>
    <value>/mnt/hda1/hbase-0.94.7/hdbstore</value>
  </property>
</configuration>
}}}

{{item1{啟動 HBase 系統}}}
{{{
# ./bin/start-hbase.sh 
starting master, logging to /mnt/hda1/hbase-0.94.7/bin/../logs/hbase-root-master-HDP120.out
}}}

{{item1{進入命令模式}}}
{{{
# ./bin/hbase shell
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 0.94.7, r1471806, Wed Apr 24 18:48:26 PDT 2013

hbase(main):001:0> status
1 servers, 0 dead, 2.0000 average load

hbase(main):002:0> quit
}}}

{{item1{停止 HBase 系統}}}
{{{
# ./bin/stop-hbase.sh 
stopping hbase..............

}}}
<<toBalaNotes "2">>



///%1
//%/

///%2
//%/

下載 VMFactory.zip, 然後將之解壓縮至登入帳號 (student) 的家目錄, 目錄結構如下 :
{{{
.
|-- cleanRootTrash.sh
|-- cpvm.sh
|-- delvm.sh
|-- ISO
|   |-- multicore_3.8.3.iso
|   |-- ubuntu-10.04.3-server-i386.iso
|   `-- ubuntu-11.04-desktop-i386.iso
|-- kvm_virsh
|   |-- floppyfw-3.0.14.img
|   |-- FloppyFW.xml
|   `-- installvm.sh
|-- LIB
|   |-- inpmenu.sh
|   `-- makevm.sh
|-- newvm.sh
|-- QCOW2
|   `-- new35g.qcow2
|-- resetMAC.sh
|-- startvm.sh
|-- VirtualManager
|   `-- insvmm.sh
|-- VM
|-- vmfactory.sh
`-- vmlist

6 directories, 18 files
}}}

''@@color:red;注意事項@@''
1. 記得先執行 resetMAC.sh, 確認本機電腦網卡設定
2. 可用 cleanRootTrash.sh 清除 root 的 trash
3. 執行 vmfactory.sh 會啟動選單式虛擬工廠
4. [[安裝與設定 gcin]]

{{op1{新增虛擬主機}}}

1. 執行 vmenu.sh 程式, 命令如下 : 
{{{
$ cd VMFactory/

$ sudo  ./vmfactory.sh 
}}}

2. 選擇 ''新增原型虛擬主機 (newvm.sh)'', 如下圖 :

[img[img/vmfactory/vmfnew01.png]]

3. 選擇 安裝光碟 

[img[img/vmfactory/vmfnew02.png]]

4. 開始安裝

[img[img/vmfactory/vmfnew03.png]]

{{op1{啟動虛擬主機}}}

1. 選擇 ''啟動原型虛擬主機 (startvm.sh)'', 如下圖 :

[img[img/vmfactory/vmfstart01.png]]

2. 選擇已安裝虛擬主機, 如下圖 :

[img[img/vmfactory/vmfstart02.png]]

{{op1{刪除虛擬主機}}}

1. 選擇 ''刪除原型虛擬主機 (delvm.sh)'', 如下圖 :

[img[img/vmfactory/vmfdel01.png]]

2. 選擇已安裝虛擬主機, 如下圖 :

[img[img/vmfactory/vmfdel02.png]]

''[註]'' 此功能會將 虛擬主機硬碟映像檔 (*.qcow2) 刪除
<<toBalaNotes "1">>
{{op1{送出虛擬主機}}}

1. 選擇 ''送出原型虛擬主機 (cpvm.sh)'', 如下圖 :

[img[img/vmfactory/vmfcp01.png]]

2. 選擇已安裝虛擬主機, 如下圖 :

[img[img/vmfactory/vmfcp02.png]]

''[註]'' 所謂 ''送出虛擬主機'' 這功能, 只是將 虛擬主機硬碟映像檔 (*.qcow2), 複製到 /var/lib/libvirt/images 目錄中
<<toBalaNotes "2">>

///%1
//%/

///%2
//%/
''參考文章''
1. Apache2 SSL in Ubuntu
http://www.linode.com/wiki/index.php/Apache2_SSL_in_Ubuntu

本文網址 : http://beginlinux.com/blog/2009/01/ssl-on-ubuntu-810-apache2/
Setting up SSL with Ubuntu 8.10 is a simple process but it does have a few gotchas that you need to be aware of.  The setup has changed from 8.04.  One issue is that the +CompatEnvVars is no longer used as it created a bug in 8.10 and you will have to enable the default-ssl site to get everything working.

First, log on to your server  Install Apache:
{{{
sudo apt-get install apache2
}}}
Change to the /etc/apache2/mods-available directory and look at the available modules.  Then change to the /etc/apache2/mods-enabled directory to see what modules are enabled:
{{{
cd /etc/apache2/mods-available
ls
cd /etc/apache2/mods-enabled
ls
}}}
Now, install and enable SSL:
{{{
sudo a2enmod ssl
sudo /etc/init.d/apache2 force-reload
}}}
Change to the default webserver directory, and create a simple web page:
{{{
cd /var/www
sudo nano index.html

Add the following content:
<html>
<head>
<title>Welcome to Your_Name’s Web Site</title>
</head>
<body>
<p>This is the best web site in the whole wide world.     </p>
</body>
</html>
}}}
Save and exit.  On your own local computer, open a tab or window for your web browser.  For the URL, enter:
{{{
http://IP_address_of_my_server
}}}
You should be able to view your web page.  Now, you’ll want to encrypt your site.    Create the server encryption keys:
{{{
cd /etc/apache2
sudo openssl genrsa -des3 -out server.key 1024
}}}
Use this set of keys to create a certificate request:
{{{
sudo openssl req -new -key server.key -out server.csr
}}}
When asked to input data, use your imagination to create something appropriate.  Be sure to write down your passphrase.  Use this request to create your self-signed certificate:
{{{
sudo openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
}}}
Install the key and certificate:
{{{
sudo cp server.crt /etc/ssl/certs/
sudo cp server.key /etc/ssl/private/
}}}
Open the “defaults” file for editing:
{{{
cd /etc/apache2/sites-available
sudo vim default-ssl
}}}
This file is basically set up but you will want to uncomment  the SSLOptions line and also change the SSLCertificate lines to reflect the location and name of your new information.
{{{
SSLEngine on
SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
SSLCertificateFile /etc/ssl/certs/server.crt
SSLCertificateKeyFile /etc/ssl/private/server.key
}}}
The port 443 is enabled when you use SSL so that is ready to go.
{{{
Enable the default SSL site:
sudo a2ensite default-ssl
}}}
If you do not enable the default-ssl you will get this error:
“ssl_error_rx_record_too_long apache”

Restart Apache.
{{{
sudo /etc/init.d/apache2 restart
}}}
That should do it.

{{item1{DNS 設定}}}
在 it66.kvm. 的 Zone 資料庫檔加入 CNAME 及 A 記錄
{{{
# cat /usr/local/etc/bind/it66.kvm.db 
$TTL 86400
@  IN SOA NS660.it66.kvm. admin.NS660.it66.kvm. (
     1      ; Serial number
     43200           ; Refresh timer - 12 hours
     3600            ; Retry timer - 1 hour
     7200            ; Expire timer - 2 hour
     86400           ; Minimum timer - 1 day
)                
@ IN      NS      NS660.it66.kvm.
NS660.it66.kvm. IN A 192.168.66.5
www IN CNAME AS995.it66.kvm.
AS995.it66.kvm. IN A 192.168.99.5
}}}

如在 Tiny Core 系統中設定 BIND, 記得重新開機前要執行以下命令
{{{
# filetool.sh -b
}}}
<<toBalaNotes "1">>
{{item1{Apache 設定}}}
''1. 建立網站目錄區''
{{{
$ sudo mkdir /home/www/it88
$ sudo nano /home/www/it88/index.html
<h1>my it88 web site</h1>

$ sudo mkdir /home/www/it66
$ sudo nano /home/www/it66/index.html
<h1>my it66 web site</h1>
}}}

''2.Virtual Host 設定''
{{{
$ sudo nano /etc/apache2/httpd.conf

<VirtualHost *:80>
        ServerName www.it66.kvm
        DocumentRoot /home/www/it66
</VirtualHost>

<VirtualHost *:80>
        ServerName www.it88.kvm
        DocumentRoot /home/www/it88
</VirtualHost>
}}}

''[註]'' ServerName 的名稱中最後不要加上 句點 (.)
<<toBalaNotes "2">>
///%1
//%/

///%2
//%/
''參考文章''
1. System: Analyzing Apache Log Files
http://www.the-art-of-web.com/system/logs/


{{item1{Common Log Format}}}
本文網址 : http://httpd.apache.org/docs/logs.html#combined

A typical configuration for the access log might look as follows.
{{{
LogFormat "%h %l %u %t \"%r\" %>s %b" common
CustomLog logs/access_log common
}}}
This defines the nickname common and associates it with a particular log format string. The format string consists of percent directives, each of which tell the server to log a particular piece of information. Literal characters may also be placed in the format string and will be copied directly into the log output. The quote character (") must be escaped by placing a backslash before it to prevent it from being interpreted as the end of the format string. The format string may also contain the special control characters "\n" for new-line and "\t" for tab.

The CustomLog directive sets up a new log file using the defined nickname. The filename for the access log is relative to the ServerRoot unless it begins with a slash.

The above configuration will write log entries in a format known as the Common Log Format (CLF). This standard format can be produced by many different web servers and read by many log analysis programs. The log file entries produced in CLF will look something like this:
{{{
127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
}}}
Each part of this log entry is described below.

''127.0.0.1 (%h)''
    This is the IP address of the client (remote host) which made the request to the server. If HostnameLookups is set to On, then the server will try to determine the hostname and log it in place of the IP address. However, this configuration is not recommended since it can significantly slow the server. Instead, it is best to use a log post-processor such as logresolve to determine the hostnames. The IP address reported here is not necessarily the address of the machine at which the user is sitting. If a proxy server exists between the user and the server, this address will be the address of the proxy, rather than the originating machine.

''- (%l)''
    The "hyphen" in the output indicates that the requested piece of information is not available. In this case, the information that is not available is the RFC 1413 identity of the client determined by identd on the clients machine. This information is highly unreliable and should almost never be used except on tightly controlled internal networks. Apache httpd will not even attempt to determine this information unless IdentityCheck is set to On.

''frank (%u)''
    This is the userid of the person requesting the document as determined by HTTP authentication. The same value is typically provided to CGI scripts in the REMOTE_USER environment variable. If the status code for the request (see below) is 401, then this value should not be trusted because the user is not yet authenticated. If the document is not password protected, this part will be "-" just like the previous one.

''[10/Oct/2000:13:55:36 -0700] (%t)''
    The time that the request was received. The format is:

    [day/month/year:hour:minute:second zone]
    day = 2*digit
    month = 3*letter
    year = 4*digit
    hour = 2*digit
    minute = 2*digit
    second = 2*digit
    zone = (`+' | `-') 4*digit

    It is possible to have the time displayed in another format by specifying %{format}t in the log format string, where format is either as in strftime(3) from the C standard library, or one of the supported special tokens. For details see the mod_log_config format strings.

''"GET /apache_pb.gif HTTP/1.0" (\"%r\")''
    The request line from the client is given in double quotes. The request line contains a great deal of useful information. First, the method used by the client is GET. Second, the client requested the resource /apache_pb.gif, and third, the client used the protocol HTTP/1.0. It is also possible to log one or more parts of the request line independently. For example, the format string "%m %U%q %H" will log the method, path, query-string, and protocol, resulting in exactly the same output as "%r".

''200 (%>s)''
    This is the status code that the server sends back to the client. This information is very valuable, because it reveals whether the request resulted in a successful response (codes beginning in 2), a redirection (codes beginning in 3), an error caused by the client (codes beginning in 4), or an error in the server (codes beginning in 5). The full list of possible status codes can be found in the HTTP specification (RFC2616 section 10).

''2326 (%b)''
    The last part indicates the size of the object returned to the client, not including the response headers. If no content was returned to the client, this value will be "-". To log "0" for no content, use %B instead.

{{item1{Combined Log Format}}}

Another commonly used format string is called the Combined Log Format. It can be used as follows.
{{{
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined
CustomLog log/access_log combined
}}}
This format is exactly the same as the Common Log Format, with the addition of two more fields. Each of the additional fields uses the percent-directive %{header}i, where header can be any HTTP request header. The access log under this format will look like:
{{{
127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "http://www.example.com/start.html" "Mozilla/4.08 [en] (Win98; I ;Nav)"
}}}
The additional fields are:
{{{
"http://www.example.com/start.html" (\"%{Referer}i\")
    The "Referer" (sic) HTTP request header. This gives the site that the client reports having been referred from. (This should be the page that links to or includes /apache_pb.gif).

"Mozilla/4.08 [en] (Win98; I ;Nav)" (\"%{User-agent}i\")
    The User-Agent HTTP request header. This is the identifying information that the client browser reports about itself. 
}}}
<<toBalaNotes "1">>

///%1
//%/
Shell In A Box implements a web server that can export arbitrary command line tools to a web based terminal emulator. 

官網 : http://code.google.com/p/shellinabox/

{{item1{Ajax 終端機 : shellinabox}}}

''1. 安裝 shellinabox''
{{{
$ wget http://shellinabox.googlecode.com/files/shellinabox_2.10-1_i386.deb
--2010-09-24 23:38:19--  http://shellinabox.googlecode.com/files/shellinabox_2.10-1_i386.deb
正在查找主機 shellinabox.googlecode.com... 72.14.203.82
正在連接 shellinabox.googlecode.com|72.14.203.82|:80... 連上了。
已送出 HTTP 要求,正在等候回應... 200 OK
長度: 124070 (121K) [application/x-archive application/x-debian-package]
Saving to: `shellinabox_2.10-1_i386.deb'

100%[======================================>] 124,070      110K/s   in 1.1s

2010-09-24 23:38:21 (110 KB/s) - `shellinabox_2.10-1_i386.deb' saved [124070/124070]

$ dpkg -i shellinabox_2.10-1_i386.deb
dpkg: 該操作需要超級使用者權限

$ sudo dpkg -i shellinabox_2.10-1_i386.deb
選取了原先未被選取的套件 shellinabox。
(正在讀取資料庫 ... 系統目前共安裝了 44091 個檔案和目錄。)
正在解開 shellinabox (從 shellinabox_2.10-1_i386.deb)...
正在設定 shellinabox (2.10-1) ...
 * Starting Shell In A Box Daemon shellinabox                            [ OK ]

正在進行 ureadahead 的觸發程式 ...
ureadahead will be reprofiled on next reboot
正在進行 man-db 的觸發程式 ...
}}}

''2. 使用 shellinabox (內定 Port 為 4200)''

[img[img/shellinabox01.png]]

[img[img/shellinabox02.png]]

''[註]'' 無法輸入中文, 可顯示中文

{{item1{取消 4200 port 連接, 只接受 80 port (http) 連接}}}

''1. 修改 /etc/default/shellinabox 設定檔''
{{{
# Should shellinaboxd start automatically
SHELLINABOX_DAEMON_START=1

# TCP port that shellinboxd's webserver listens on
SHELLINABOX_PORT=80                      # 將 4200 改成 80

# Parameters that are managed by the system and usually should not need
# changing:
# SHELLINABOX_DATADIR=/var/lib/shellinabox
# SHELLINABOX_USER=shellinabox
# SHELLINABOX_GROUP=shellinabox

# Any optional arguments (e.g. extra service definitions)
# We disable beeps, as there have been reports of the VLC plugin crashing
# Firefox on Linux/x86_64.
SHELLINABOX_ARGS="--no-beep --disable-ssl"          # 加入 --disable-ssl 這參數 (要有雙引號), 取消執行 ssl 功能, 這樣可增加執行速度
}}}

''2. 重新啟動 shellinabox''
{{{
$ sudo invoke-rc.d shellinabox restart
}}}

{{item1{取消 4200 port 連接, 只接受 443 port (https) 連接}}}

''1. 修改 /etc/default/shellinabox 設定檔''
{{{
$ sudo nano /etc/default/shellinabox
# Should shellinaboxd start automatically
SHELLINABOX_DAEMON_START=1

# TCP port that shellinboxd's webserver listens on
SHELLINABOX_PORT=4200                       # 將 4200 改成 443

# Parameters that are managed by the system and usually should not need
# changing:
# SHELLINABOX_DATADIR=/var/lib/shellinabox
# SHELLINABOX_USER=shellinabox
# SHELLINABOX_GROUP=shellinabox

# Any optional arguments (e.g. extra service definitions)
# We disable beeps, as there have been reports of the VLC plugin crashing
# Firefox on Linux/x86_64.
SHELLINABOX_ARGS=--no-beep
}}}

''2. 重新啟動 shellinabox''
{{{
$ sudo invoke-rc.d shellinabox restart
}}}

<<toBalaNotes "1">>

///%1
//%/
本文網址 : http://www.ithome.com.tw/itadm/article.php?c=78367

財政部財政資訊中心加上臺灣5地區的國稅局導入OpenOffice辦公軟體,目前共計8千多名員工使用,更預計在今年大幅移除原有的微軟Office軟體,只會保留5%的Office軟體授權,用於轉換對外交換的文件檔案。

在2011年時,財政資訊中心和5地區國稅局(含臺北國稅局、高雄國稅局、臺灣省北區國稅局、臺灣省中區國稅局和臺灣省南區國稅局)接下了一個政府招案,這些單位將會有密切的合作,自然文件檔案交換頻率也會增加,同時,又面臨機關內部Office軟體版本過舊,大都是微軟舊版的Office 2000或Office 2003,正好是辦公軟體的升級時機。

但是考慮到全部換新的Office軟體授權費用並不便宜,Office 2007以上版本的使用介面也和過去大不相同,財政資訊中心就開始考慮,既然使用者都要適應新介面,那不如趁這個機會導入開放格式的辦公軟體。

因為財政部屬公家機關,大部分屬敏感資料,所以辦公軟體在線上操作較有洩漏資料的疑慮,在這個考量之下,像Google雲端作業軟體自然就不在考慮範圍內。

負責導入這次專案的財政部財政資訊中心助理設計師曾政龍表示,當時財政資訊中心想買斷授權,又希望不被軟體格式限制,因此同時評估了3個自由軟體方案,分別是LibreOffice、OpenOffice和EIOffice。

但實際比較過後,發現EIOffice不夠自由,輸出的文件檔並不是ODF的開放格式,而LibreOffice對使用者使用問題的支援較不足,有問題時沒辦法立即解決。最後,財政資訊中心才選擇OpenOffice作為替換微軟Office的選擇方案。

高層主管出席教育訓練帶頭導入
決定好軟體後,財政資訊中心從2011年11月起,就開始進行OpenOffice的一連串導入措施。

首先是舉辦財政資訊中心以及5區國稅局的OpenOffice內部說明會,但重要的是,每次有內部說明會,財政資訊中心主任蘇俊榮,以及各國稅局的高層主管都會親自出席,來支持這個專案導入。

此外,蘇俊榮更會在很多公開場合宣導Open Office的好處。在推廣期間,財政資訊中心和5區國稅局甚至共同舉辦了「OpenOffice.org學習成果文件競賽」,透過競賽讓使用者熟悉這套軟體。

曾政龍表示,這次的專案能夠導入成功,有很大原因來自於高階主管的支持與親身力行,帶領底下員工嘗試和過去不同的辦公作業軟體環境。 
>@@font-size:20pt;演講主題 : Linux 核心虛擬化技術@@
>
>@@font-size:20pt;主講者 : 小陳老師 (oc99.98@gmail.com)@@
>
>@@font-size:20pt;著作@@
>
>[img[img/ubuntukvm.jpg]]&nbsp;&nbsp;&nbsp;[img[img/fedorakvm.jpg]]

{{item1{@@font-size:14pt; 建立巢狀虛擬系統 (Nested VM)@@}}}

>文章網址 : http://linuxkvm.blogspot.tw/2012/05/nested-vm.html

{{item1{@@font-size:14pt; 認識 Linux 核心虛擬化技術@@}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm001.swf" "860" "680" "1. 連接 Linux KVM 線上講義">>@@

>Linux KVM 研究室網址 : http://linuxkvm.blogspot.tw/

@@font-size:14pt;<<toBalaSWF2 "movie/kvm002.swf" "840" "660" "2. 確認 Linux KVM 安裝規格">>@@
{{{
$ sudo apt-get install cpu-checker
$ sudo kvm-ok
INFO: /dev/kvm does not exist
HINT:   sudo modprobe kvm_intel
INFO: Your CPU supports KVM extensions
KVM acceleration can be used

$ modinfo kvm
filename:       /lib/modules/3.2.0-44-generic/kernel/arch/x86/kvm/kvm.ko
license:        GPL
author:         Qumranet
srcversion:     F5F707818318FB4A6480A90
depends:        
intree:         Y
vermagic:       3.2.0-44-generic SMP mod_unload modversions 
parm:           min_timer_period_us:uint
parm:           oos_shadow:bool
parm:           ignore_msrs:bool
parm:           allow_unsafe_assigned_interrupts:Enable device assignment on platforms without interrupt remapping support. (bool)

}}}
@@font-size:14pt;<<toBalaSWF2 "movie/kvm003.swf" "840" "660" "3. 安裝 Linux KVM">>@@
{{{
$ sudo apt-get install qemu-kvm
$ kvm --version
QEMU emulator version 1.0 (qemu-kvm-1.0), Copyright (c) 2003-2008 Fabrice Bellard
}}}
''Linux KVM 系統架構圖''

[img[img/kvm/kvm_qemu01.png]]
<<toBalaNotes "1">>
{{item1{@@font-size:14pt; 使用 Linux 核心虛擬化技術@@}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm004.swf" "840" "660" "1. 第一次啟動 Linux KVM 虛擬電腦 - Tiny Server Core">>@@
{{{
$ wget http://tobala.net/download/tsc32.iso
$ kvm -m 128 -cdrom tsc32.iso -boot d
}}}
QEMU 自閉網路架構 : http://wiki.qemu.org/Documentation/Networking

@@font-size:14pt;<<toBalaSWF2 "movie/kvm005.swf" "840" "660" "2. 第一次使用虛擬硬碟檔">>@@
{{{
$ kvm-img create -f raw tsc32.img 30m
$ kvm -m 128 -cdrom tsc32.iso -hda tsc32.img -boot d

## 在 TSC 虛擬電腦中執行
$ sudo fdisk /dev/sda
$ sudo mkfs.ext4 /dev/sda1
$ sudo reboot

## 下載 nano 套件
$ tce-load -wi nano.tcz
}}}
<<toBalaNotes "2">>
{{item1{@@font-size:14pt; 建置 Linux KVM 網路系統@@}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm006.swf" "840" "660" "1. 製作 Linux KVM 虛擬網卡 (TUN/TAP)">>@@
{{{
$ sudo apt-get install uml-utilities
$ sudo tunctl -t myn1 -u student
$ ifconfig myn1
myn1      Link encap:Ethernet  HWaddr 26:fc:72:15:da:c7  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

$ sudo ifconfig myn1 172.16.10.10 up
$ ifconfig myn1
myn1      Link encap:Ethernet  HWaddr 26:fc:72:15:da:c7  
          inet addr:172.16.10.10  Bcast:172.16.255.255  Mask:255.255.0.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

$ ping 172.16.10.10
$ sudo tunctl -d myn1
$ ifconfig myn1
myn1: error fetching interface information: Device not found
}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm007.swf" "840" "660" "2. 製作虛擬集線器 (Switch HUB)">>@@
{{{
## 製作 Switch HUB 的機盒
$ sudo brctl addbr myhub
$ sudo ifconfig myhub 172.16.20.254 netmask 255.255.255.0 up
$ brctl show myhub
bridge name	bridge id		STP enabled	interfaces
myhub		8000.000000000000	no		

## 製作 Switch HUB 的 Port
$ sudo tunctl -t mp1 -u student
$ sudo tunctl -t mp2 -u student
$ sudo ifconfig mp1 up
$ sudo ifconfig mp2 up
$ ifconfig -a
mp1       Link encap:Ethernet  HWaddr 8e:61:ae:ee:79:54  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

mp2       Link encap:Ethernet  HWaddr d6:f1:02:a5:91:5d  
          BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

myhub     Link encap:Ethernet  HWaddr 9e:ff:5d:55:33:f3  
          inet addr:172.16.20.254  Bcast:172.16.20.255  Mask:255.255.255.0
          inet6 addr: fe80::9cff:5dff:fe55:33f3/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:38 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:0 (0.0 B)  TX bytes:7526 (7.5 KB)

$ sudo brctl addif myhub mp1
$ sudo brctl addif myhub mp2
$ brctl show myhub
bridge name	bridge id		STP enabled	interfaces
myhub		8000.8e61aeee7954	no		mp1
							mp2

}}}

''移除自製 Switch HUB''
{{{
$ sudo brctl delbr myhub
$ sudo ifconfig myhub down
$ sudo brctl delbr myhub
}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm008.swf" "840" "660" "3. 連上虛擬集線器 (Switch HUB)">>@@
{{{
$ sudo dnsmasq -u student --bind-interfaces --dhcp-leasefile=/tmp/dnsmasq172.log --conf-file= --listen-address 172.16.20.254 --dhcp-range 172.16.20.100,172.16.20.110 --dhcp-lease-max=10 --except-interface=lo --interface=myhub
$ kvm -name "tsc3210" -m 128 -cdrom tsc32.iso -net nic,macaddr=52:54:72:16:20:10 -net tap,ifname=mp1,script=no,downscript=no -boot d &
$ kvm -name "tsc3211" -m 128 -cdrom tsc32.iso -net nic,macaddr=52:54:72:16:20:11 -net tap,ifname=mp2,script=no,downscript=no -boot d &
}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm009.swf" "840" "660" "4. 啟動 NAT 功能">>@@
{{{
$ echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward

$ sudo iptables -t nat -A POSTROUTING -s 172.16.20.0/24 -j MASQUERADE
}}}
<<toBalaNotes "3">>
{{item1{@@font-size:14pt; Linux KVM 魔法書實務應用@@}}}

@@font-size:14pt;<<toBalaSWF2 "movie/kvm012.swf" "840" "660" "1. 認識 Linux KVM 魔法書">>@@

[img[img/hadoop/hadoop.png]]
{{op1{Hadoop 分散檔案系統定義檔}}}
{{{
$ cat ~/kvmhdfs1.0/conf/hadoop104.xml 
<?xml version="1.0"?>
<network>

  <switch-hub name="SH100">
     <!-- 設定 Bare-Metal 主機可直接與此網段其他虛擬主機連接的 IP 位址 (可以不設定) -->
     <ip>172.16.100.1/24</ip>
     <ports>
       <!-- 此 Port 作為 Switch-Hub 裝置本身使用 --> 
       <tap name="SH100-NET" mac="02:01:00:00:00:00"/>

       <!-- 這些 Port 提供給虛擬主機使用 -->
       <tap name="SH100P1" mac="02:01:00:00:00:01"/>
       <tap name="SH100P2" mac="02:01:00:00:00:02"/>
       <tap name="SH100P3" mac="02:01:00:00:00:03"/>
       <tap name="SH100P4" mac="02:01:00:00:00:04"/>

       <!-- 這些 Port 提供給路由主機使用 -->
       <tap name="SH100R1" mac="02:01:00:00:00:f0"/>
       <tap name="SH100R2" mac="02:01:00:00:00:f1"/>
     </ports>
  </switch-hub>
                                           :
  <client name="SH100" osname="TSCKERNEL" ostype="32">
     <vm name="NN" mem="768" tap="SH100P1" mac="02:01:72:16:10:10">
       <!-- 設定虛擬主機的 IP, Default Gateway 及 DNS Server -->
       <ipv4>172.16.100.10:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>
       <disk name="vmdisk/NN.qcow2"/>
       <superuser>true</superuser>
     </vm>
     <vm name="SN" mem="512" tap="SH100P2" mac="02:01:72:16:10:11">
       <ipv4>172.16.100.11:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>
       <disk name="vmdisk/SN.qcow2"/>
       <superuser>true</superuser>
     </vm>
     <vm name="DN01" mem="512" tap="SH100P3" mac="02:01:72:16:10:12">
       <ipv4>172.16.100.12:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>
       <disk name="vmdisk/DN01.qcow2"/>
       <superuser>true</superuser>
     </vm>
     <vm name="DN02" mem="512" tap="SH100P4" mac="02:01:72:16:10:13">
       <ipv4>172.16.100.13:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>
       <disk name="vmdisk/DN02.qcow2"/>
       <superuser>true</superuser>
     </vm>
  </client>
                                        :
  <router name="R100.200">
     <vm name="R100.200" mem="128" osname="TSCKERNEL" ostype="32">
       <uplink port="SH100R1" mac="02:01:72:16:10:20">
         <ipv4>172.16.100.200:255.255.255.0:172.16.100.254:168.95.1.1</ipv4>
       </uplink>
       <nextlink port="SH200R1" mac="02:02:72:16:20:ff">
         <ipv4>172.16.200.254:255.255.255.0::</ipv4>
       </nextlink>
       <disk name="vmdisk/R100.200.img"/>
     </vm>  
  </router>

  <router name="RBR0.100">
     <vm name="NAT100" mem="128" osname="TSCKERNEL" ostype="32">
       <uplink sw="br0" port="NAT100" mac="02:01:72:16:10:22">
         <!-- 設定此網路介面使用 DHCP 取得 TCP/IP 設定 -->
         <ipv4>dhcp</ipv4>
       </uplink>
       <nextlink port="SH100R2" mac="02:02:72:16:10:ff">
         <ipv4>172.16.100.254:255.255.255.0::</ipv4>
       </nextlink>
       <!-- 啟動 NAT 功能 -->
       <nat>true</nat>
       <disk name="vmdisk/NAT100.img"/>
     </vm>
  </router>

  <!-- 只提供 raw 虛擬硬碟格式 -->
  <vm-disk>
    <disk type="raw" name="vmdisk/R100.200.img" size="20m"/>
    <disk type="raw" name="vmdisk/NAT100.img" size="20m"/>
  </vm-disk>

</network>
}}}
@@font-size:14pt;<<toBalaSWF2 "movie/kvm012.swf" "840" "660" "2. 安裝 Hadoop 分散檔案系統">>@@
{{{
$ wget http://tbala.net/download/kvmhdfs1.0.zip
$ unzip kvmhdfs1.0.zip 
$ cd kvmhdfs1.0/
$ sudo ./hdfs.sh 
$ sudo apt-get install libxml2-utils libxml-xpath-perl 
$ exit
}}}
@@font-size:14pt;<<toBalaSWF2 "movie/kvm013.swf" "840" "660" "3. 啟動 Hadoop 分散檔案系統">>@@
{{{
$ cd kvmhdfs1.0/
$ sudo ./hdfs.sh

### 在 NN 虛擬主機, 執行以下命令
$ ./checkHDFS.sh 
$ hadoop dfsadmin -report  
}}}
@@font-size:14pt;<<toBalaSWF2 "movie/kvm014.swf" "840" "660" "4. 使用 Hadoop 分散檔案系統">>@@
{{{
HDFS:mapreduce> sudo nano /etc/hosts
127.0.0.1	localhost
127.0.1.1	UB641
172.16.100.10   NN
172.16.100.11   SN
172.16.100.12   DN01exit
172.16.100.13   DN02

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

HDFS:mapreduce> hadoop dfs -mkdir inputlog
HDFS:mapreduce> hadoop dfs -put ge.myruby.net  inputlog
HDFS:mapreduce> hadoop dfs -put wiki.myruby.net  inputlog

HDFS:mapreduce> javac -d MyLog/   ApacheLog*.java
HDFS:mapreduce> jar -cvf apachelog.jar -C  MyLog/  . 

HDFS:mapreduce> hadoop jar apachelog.jar ApacheLog  inputlog/  output/
HDFS:mapreduce> hadoop dfs -ls /user/root/output
HDFS:mapreduce> hadoop dfs -cat output/part-r-00000

### 在 NN 虛擬主機, 執行以下命令
# stop-all.sh                                     
stopping jobtracker                                                             
DN01: stopping tasktracker                                                      
DN02: stopping tasktracker                                                      
stopping namenode                                                               
DN02: stopping datanode                                                         
DN01: stopping datanode                                                         
SN: stopping secondarynamenode      

$ byeall
}}}
<<toBalaNotes "4">>


///%1
//%/

///%2
//%/

///%3
//%/

///%4
//%/

在文章中使用 [巨集] 可獲得更多資訊運作功能, 例如: 以標籤來排序文章, 將過濾過文章寫入外部文字檔

巨集使用格式如下 : 
{{{
<<macroName [parameters]>>
}}}

<<toBalaNotes "macro">>


///%macro
//%/
|!巨集名稱|!說明|!語法|
|allTags|List all Tags in a Tiddler|{{{<}}}{{{<allTags>>}}}|
|closeAll|Close all displayed Tiddlers|{{{<}}}{{{<closeAll>>}}}|
|list all|List all Tiddlers in a Tiddler|{{{<}}}{{{<list all>>}}}|
|list missing|List all Missing Tiddlers in a Tiddler|{{{<}}}{{{<list missing>>}}}|
|list orphans|List all orphaned Tiddlers in a Tiddler|{{{<}}}{{{<list orphans>>}}}|
|newJournal|Create new date & Time stamped Tiddler|{{{<}}}{{{<newJournal>>}}}|
|newTiddler|Create new Tiddler|{{{<}}}{{{<newTiddler>>}}}|
|permaview|URL link for all open Tiddlers|{{{<}}}{{{<permaview>>}}}|
|saveChanges |Save all ~TiddlyWiki changes|{{{<}}}{{{<saveChanges>>}}}|
|search|Display a Search box|{{{<}}}{{{<search>>}}}|
|slider|Display a Slider|{{{<}}}{{{<slider sliderID sliderTiddler sliderLabel>>}}}|
|tabs|Display Tabbed content|{{{<}}}{{{<tabs indentifier tabLabel tabName Tiddler>>}}}|
|tag|Display a Tag ~PopUp|{{{<}}}{{{<tag tagName>>}}}|
|tiddler|Display inline contents of a Tiddler|{{{<}}}{{{<tiddler Tiddler>>}}}|
|timeline|Display Timeline in a Tiddler|{{{<}}}{{{<timeline>>}}}|
|today|Dusplay Today's Date|{{{<}}}{{{<today>>}}}|
|version|Display ~TiddlyWiki's version|{{{<}}}{{{<version>>}}}|

<<toBalaNotes "buildin">>


///%buildin
//%/
{{item1{newTiddler 巨集使用格式說明}}}
{{{

<<newTiddler  label:"新增網頁"  tag:"HTML"   template:"HTMLEditTemplate"  title:"新增網頁">>

label:"新增網頁"  --->  按鈕名稱
tag:"HTML"  ---->  新文章的標籤
template:"HTMLEditTemplate"  ---->  新文章建立使用的 Template
title:"新增網頁"  ----> 新文章的 Title
}}}

{{op1{範例}}}
新增ㄧ篇標題為 Ajax 的文章, 使用格式如下 :
{{{
<<newTiddler label:"新增 Ajax 文章" title:"Ajax" tag:"Ajax" text:"/*{{{*/  /*}}}*/">>
}}}

__{{op1{實作範例}}}__

<<newTiddler label:"新增 Ajax 文章" title:"Ajax" tag:"Ajax" text:"/*{{{*/  /*}}}*/">>
{{{

include 巨集的使用格式說明

載入指定的 TiddlyWiki 網頁, 而不顯示 include 巨集命令 (hide:true)
<<include "tBalaTech-DSL1209.html" hide: true>>

顯示載入TiddlyWiki 網頁中的 Tiddler
<<tiddler '何謂 permaview ?'>>

}}}

!使用說明
{{{
1. 規劃標籤名
2. 根據 "標籤名" 產生 [編輯] 與 [顯示] 的 Template

    例如 : 標籤名為 HTML, 那麼二個 Template 的名稱為 HTMLEditTemplate, HTMLViewTemplate,
              以後只要文章的標籤有 HTML, 便會自動套用這二個 Template

3. 將以下巨集命令加入 ToolBar 文章中

    <<newTiddler  label:"新增網頁"    tag:"HTML"     template:"HTMLEditTemplate"   title:"新增網頁">>
                              ^                ^                      ^                           ^
                         按鈕名稱        新文章的標籤      新文章第一次使用的 Template     新文章的 Title

    * 必須要安裝 TaggedTemplateTweak 這個插件, 才有以上功能

}}}

請點選以下按鈕, 測試 easyEdit 功能

<<newTiddler label:新增網頁 tag:"HTML" template:"HTMLEditTemplate" title:"新增網頁">>


!HTMLEditTemplate
{{{
<div class='toolbar' macro='toolbar +saveTiddler  -cancelTiddler deleteTiddler'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='easyEdit text'></div>
                                      ^
                                 這是重點

<div class='editor' macro='edit tags'></div>
<div class='editorFooter'>
<span macro='message views.editor.tagPrompt'></span>
<span macro='tagChooser'></span>
</div>
}}}

!HTMLViewTemplate
{{{
<div class='toolbar' macro='toolbar closeTiddler closeOthers +easyEdit > fields syncing permalink references jump'></div>
                                                                    ^
                                                                這是重點
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
}}}
''horizontal:''
{{{
* menu #1
** [[item #1-1]]
** [[item #1-2]]
** [[item #1-3]]
* menu #2
** [[item #2-1]]
** [[item #2-2]]
** [[menu #2-3]]
* menu #3
** [[item #2-1]]
** [[item #2-2]]
** [[menu #2-3]]
<<dropMenu>>
}}}
* menu #1
** [[item #1-1]]
** [[item #1-2]]
** [[item #1-3]]
* menu #2
** [[item #2-1]]
** [[item #2-2]]
** [[menu #2-3]]
* menu #3
** [[item #2-1]]
** [[item #2-2]]
** [[menu #2-3]]
<<dropMenu>>

''vertical:''
{{{
* menu #1
** [[item #1-1]]
** [[item #1-2]]
** [[item #1-3]]
* menu #2
** [[item #2-1]]
** [[item #2-2]]
** [[menu #2-3]]
<<dropMenu vertical>>
}}}

* menu #1
** [[item #1-1]]
** [[item #1-2]]
** [[item #1-3]]
* menu #2
** [[item #2-1]]
** [[item #2-2]]
** [[menu #2-3]]
<<dropMenu vertical>>


/% %/
/***
|<html><a name="Top"/></html>''Name:''|PartTiddlerPlugin|
|''Version:''|1.0.9 (2007-07-14)|
|''Source:''|http://tiddlywiki.abego-software.de/#PartTiddlerPlugin|
|''Author:''|UdoBorkowski (ub [at] abego-software [dot] de)|
|''Licence:''|[[BSD open source license]]|
|''CoreVersion:''|2.1.3|
|''Browser:''|Firefox 1.0.4+; InternetExplorer 6.0|
!Table of Content<html><a name="TOC"/></html>
* <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Description',null, event)">Description, Syntax</a></html>
* <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Applications',null, event)">Applications</a></html>
** <html><a href="javascript:;" onclick="window.scrollAnchorVisible('LongTiddler',null, event)">Refering to Paragraphs of a Longer Tiddler</a></html>
** <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Citation',null, event)">Citation Index</a></html>
** <html><a href="javascript:;" onclick="window.scrollAnchorVisible('TableCells',null, event)">Creating "multi-line" Table Cells</a></html>
** <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Tabs',null, event)">Creating Tabs</a></html>
** <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Sliders',null, event)">Using Sliders</a></html>
* <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Revisions',null, event)">Revision History</a></html>
* <html><a href="javascript:;" onclick="window.scrollAnchorVisible('Code',null, event)">Code</a></html>
!Description<html><a name="Description"/></html>
With the {{{<part aPartName> ... </part>}}} feature you can structure your tiddler text into separate (named) parts. 
Each part can be referenced as a "normal" tiddler, using the "//tiddlerName//''/''//partName//" syntax (e.g. "About/Features").  E.g. you may create links to the parts (e.g. {{{[[Quotes/BAX95]]}}} or {{{[[Hobbies|AboutMe/Hobbies]]}}}), use it in {{{<<tiddler...>>}}} or {{{<<tabs...>>}}} macros etc.


''Syntax:'' 
|>|''<part'' //partName// [''hidden''] ''>'' //any tiddler content// ''</part>''|
|//partName//|The name of the part. You may reference a part tiddler with the combined tiddler name "//nameOfContainerTidder//''/''//partName//. <<br>>If you use a partName containing spaces you need to quote it (e.g. {{{"Major Overview"}}} or {{{[[Shortcut List]]}}}).|
|''hidden''|When defined the content of the part is not displayed in the container tiddler. But when the part is explicitly referenced (e.g. in a {{{<<tiddler...>>}}} macro or in a link) the part's content is displayed.|
|<html><i>any&nbsp;tiddler&nbsp;content</i></html>|<html>The content of the part.<br>A part can have any content that a "normal" tiddler may have, e.g. you may use all the formattings and macros defined.</html>|
|>|~~Syntax formatting: Keywords in ''bold'', optional parts in [...]. 'or' means that exactly one of the two alternatives must exist.~~|
<html><sub><a href="javascript:;" onclick="window.scrollAnchorVisible('Top',null, event)">[Top]</sub></a></html>

!Applications<html><a name="Applications"/></html>
!!Refering to Paragraphs of a Longer Tiddler<html><a name="LongTiddler"/></html>
Assume you have written a long description in a tiddler and now you want to refer to the content of a certain paragraph in that tiddler (e.g. some definition.) Just wrap the text with a ''part'' block, give it a nice name, create a "pretty link" (like {{{[[Discussion Groups|Introduction/DiscussionGroups]]}}}) and you are done.

Notice this complements the approach to first writing a lot of small tiddlers and combine these tiddlers to one larger tiddler in a second step (e.g. using the {{{<<tiddler...>>}}} macro). Using the ''part'' feature you can first write a "classic" (longer) text that can be read "from top to bottom" and later "reuse" parts of this text for some more "non-linear" reading.

<html><sub><a href="javascript:;" onclick="window.scrollAnchorVisible('Top',null, event)">[Top]</sub></a></html>

!!Citation Index<html><a name="Citation"/></html>
Create a tiddler "Citations" that contains your "citations". 
Wrap every citation with a part and a proper name. 

''Example''
{{{
<part BAX98>Baxter, Ira D. et al: //Clone Detection Using Abstract Syntax Trees.// 
in //Proc. ICSM//, 1998.</part>

<part BEL02>Bellon, Stefan: //Vergleich von Techniken zur Erkennung duplizierten Quellcodes.// 
Thesis, Uni Stuttgart, 2002.</part>

<part DUC99>Ducasse, Stéfane et al: //A Language Independent Approach for Detecting Duplicated Code.// 
in //Proc. ICSM//, 1999.</part>
}}}

You may now "cite" them just by using a pretty link like {{{[[Citations/BAX98]]}}} or even more pretty, like this {{{[[BAX98|Citations/BAX98]]}}}.

<html><sub><a href="javascript:;" onclick="window.scrollAnchorVisible('Top',null, event)">[Top]</sub></a></html>

!!Creating "multi-line" Table Cells<html><a name="TableCells"/></html>
You may have noticed that it is hard to create table cells with "multi-line" content. E.g. if you want to create a bullet list inside a table cell you cannot just write the bullet list
{{{
* Item 1
* Item 2
* Item 3
}}}
into a table cell (i.e. between the | ... | bars) because every bullet item must start in a new line but all cells of a table row must be in one line.

Using the ''part'' feature this problem can be solved. Just create a hidden part that contains the cells content and use a {{{<<tiddler >>}}} macro to include its content in the table's cell.

''Example''
{{{
|!Subject|!Items|
|subject1|<<tiddler ./Cell1>>|
|subject2|<<tiddler ./Cell2>>|

<part Cell1 hidden>
* Item 1
* Item 2
* Item 3
</part>
...
}}}

Notice that inside the {{{<<tiddler ...>>}}} macro you may refer to the "current tiddler" using the ".".

BTW: The same approach can be used to create bullet lists with items that contain more than one line.

<html><sub><a href="javascript:;" onclick="window.scrollAnchorVisible('Top',null, event)">[Top]</sub></a></html>

!!Creating Tabs<html><a name="Tabs"/></html>
The build-in {{{<<tabs ...>>}}} macro requires that you defined an additional tiddler for every tab it displays. When you want to have "nested" tabs you need to define a tiddler for the "main tab" and one for every tab it contains. I.e. the definition of a set of tabs that is visually displayed at one place is distributed across multiple tiddlers.

With the ''part'' feature you can put the complete definition in one tiddler, making it easier to keep an overview and maintain the tab sets.

''Example''
The standard tabs at the sidebar are defined by the following eight tiddlers:
* SideBarTabs
* TabAll
* TabMore
* TabMoreMissing
* TabMoreOrphans
* TabMoreShadowed
* TabTags
* TabTimeline

Instead of these eight tiddlers one could define the following SideBarTabs tiddler that uses the ''part'' feature:
{{{
<<tabs txtMainTab 
    Timeline Timeline SideBarTabs/Timeline 
    All 'All tiddlers' SideBarTabs/All 
    Tags 'All tags' SideBarTabs/Tags 
    More 'More lists' SideBarTabs/More>>
<part Timeline hidden><<timeline>></part>
<part All hidden><<list all>></part>
<part Tags hidden><<allTags>></part>
<part More hidden><<tabs txtMoreTab 
    Missing 'Missing tiddlers' SideBarTabs/Missing 
    Orphans 'Orphaned tiddlers' SideBarTabs/Orphans 
    Shadowed 'Shadowed tiddlers' SideBarTabs/Shadowed>></part>
<part Missing hidden><<list missing>></part>
<part Orphans hidden><<list orphans>></part>
<part Shadowed hidden><<list shadowed>></part>
}}}

Notice that you can easily "overwrite" individual parts in separate tiddlers that have the full name of the part.

E.g. if you don't like the classic timeline tab but only want to see the 100 most recent tiddlers you could create a tiddler "~SideBarTabs/Timeline" with the following content:
{{{
<<forEachTiddler 
		sortBy 'tiddler.modified' descending 
		write '(index < 100) ? "* [["+tiddler.title+"]]\n":""'>>
}}}
<html><sub><a href="javascript:;" onclick="window.scrollAnchorVisible('Top',null, event)">[Top]</sub></a></html>

!!Using Sliders<html><a name="Sliders"/></html>
Very similar to the build-in {{{<<tabs ...>>}}} macro (see above) the {{{<<slider ...>>}}} macro requires that you defined an additional tiddler that holds the content "to be slid". You can avoid creating this extra tiddler by using the ''part'' feature

''Example''
In a tiddler "About" we may use the slider to show some details that are documented in the tiddler's "Details" part.
{{{
...
<<slider chkAboutDetails About/Details details "Click here to see more details">>
<part Details hidden>
To give you a better overview ...
</part>
...
}}}

Notice that putting the content of the slider into the slider's tiddler also has an extra benefit: When you decide you need to edit the content of the slider you can just doubleclick the content, the tiddler opens for editing and you can directly start editing the content (in the part section). In the "old" approach you would doubleclick the tiddler, see that the slider is using tiddler X, have to look for the tiddler X and can finally open it for editing. So using the ''part'' approach results in a much short workflow.

<html><sub><a href="javascript:;" onclick="window.scrollAnchorVisible('Top',null, event)">[Top]</sub></a></html>

!Revision history<html><a name="Revisions"/></html>
* v1.0.9 (2007-07-14)
** Bugfix: Error when using the SideBarTabs example and switching between "More" and "Shadow". Thanks to cmari for reporting the issue.
* v1.0.8 (2007-06-16)
** Speeding up display of tiddlers containing multiple pard definitions. Thanks to Paco Rivière for reporting the issue.
** Support "./partName" syntax inside <<tabs ...>> macro
* v1.0.7 (2007-03-07)
** Bugfix: <<tiddler "./partName">> does not always render correctly after a refresh (e.g. like it happens when using the "Include" plugin). Thanks to Morris Gray for reporting the bug.
* v1.0.6 (2006-11-07)
** Bugfix: cannot edit tiddler when UploadPlugin by Bidix is installed. Thanks to José Luis González Castro for reporting the bug.
* v1.0.5 (2006-03-02)
** Bugfix: Example with multi-line table cells does not work in IE6. Thanks to Paulo Soares for reporting the bug.
* v1.0.4 (2006-02-28)
** Bugfix: Shadow tiddlers cannot be edited (in TW 2.0.6). Thanks to Torsten Vanek for reporting the bug.
* v1.0.3 (2006-02-26)
** Adapt code to newly introduced Tiddler.prototype.isReadOnly() function (in TW 2.0.6). Thanks to Paulo Soares for reporting the problem.
* v1.0.2 (2006-02-05)
** Also allow other macros than the "tiddler" macro use the "." in the part reference (to refer to "this" tiddler)
* v1.0.1 (2006-01-27)
** Added Table of Content for plugin documentation. Thanks to RichCarrillo for suggesting.
** Bugfix: newReminder plugin does not work when PartTiddler is installed. Thanks to PauloSoares for reporting.
* v1.0.0 (2006-01-25)
** initial version

使用 toBalaSWF2 巨集, 來撥放指定的 Flash 檔, 命令格式如下 :
{{{
<<toBalaSWF2 "movie/infotree.swf" "820" "610">>
}}}

{{item1{實作範例 : winzip 的安裝}}}

<<toBalaSWF2 "movie/winzip.swf" "820" "610" "winzip 的安裝影片">>

<<toBalaNotes "flash">>


///%flash
//%/
{{item1{可縮放向量圖形 - Scalable Vector Graphics (SVG)}}}

SVG 是基於可擴展標記語言(XML),用於描述二維向量圖形的一種圖形格式。SVG 由 W3C 制定,是一個開放標準。

__{{op1{Inkscape SVG 圖形編輯器}}}__
官方網站 : http://www.inkscape.org/index.php
Inkscape 是一套以自由軟體方式發佈與使用的向量圖形編輯器,該套軟體的開發目標是成為一套強力的繪圖工具軟體,且能完全遵循與支援 XML、SVG、CSS 等開放性的標準格式,此外 Inkscape 同時也是一套跨平臺性的應用程式,針對不同的作業系統它都有能搭配對應執行的版本,如 Windows 版、Mac OS X 版、Linux 版、以及類 UNIX 版等作業系統,不過主要仍是以 Linux 為開發平臺。

Inkscape 開始於 2003 年,最初是向量繪圖編輯器:Sodipodi 的一個分支、分線發展(fork),當時的 Inkscape 尚未具備太多商業版向量編輯器才有的功效特點,不過卻廣泛適合各類的應用。Inkscape 雖支援 SVG、CSS 等標準,然仍尚未支援完全;其中較重要的SVG 濾鏡效果(SVG filter effect)、動畫、SVG 字型等都還未能實現。而自 2006 年起 Inkscape 的發展逐漸積極活躍,如今正不斷加入新的特點與機制功效。

__{{op1{教學影片}}}__
1. Inkscape 下載 <<toBalaSWF2 "movie/downloadinkscape.swf" "820" "610" "影片">>

2. Inkscape 安裝 <<toBalaSWF2 "movie/installinkscape.swf" "820" "610" "影片">>

3. 製作隨身版 Inkscape <<toBalaSWF2 "movie/portableinkscape.swf" "820" "610" "影片">>
&nbsp;&nbsp;&nbsp;&nbsp;這步驟只是將安裝完成的 Inkscape 目錄, 完整複製到隨身裝置 (USB 隨身碟), 然後將 Inkscape 從本機移除

4. 編輯 SVG 檔 <<toBalaSWF2 "movie/createsvg.swf" "820" "610" "影片">>
http://file.jdps.tcc.edu.tw/~t90042/inkscape/l5/l5.htm

5. 在 [學習筆記本] 的文章中顯示 SVG 圖檔 <<toBalaSWF2 "movie/tiddlersvg.swf" "820" "610" "影片">>
&nbsp;&nbsp;&nbsp;&nbsp;利用 Inkscape 產生的 SVG 檔, 請儲存至 toBalaKMKNotepad\img\svg 目錄中, 然後在[學習筆記本] 的文章中, 使用以下的 HTML 標籤來顯示 SVG 檔
{{{
<html>
<object data="img/svg/JavaSecurity.svg" TYPE="image/svg+xml" width="505" height="355" align="center">
  <a href="svg/JavaSecurity.svg">Java security Model</a>
</object>
</html>
}}}

__{{op1{參考網站}}}__
[[運用 Inkscape 所創作出來的線上圖集|http://inkscape.deviantart.com/favourites]]

<<toBalaNotes "svg">>


 

///%svg
//%/

|Jesse James Garrett[img[img/headshot_garrett.jpg]] |<< @@font-size:16pt;color:#00f;line-height:25pt;Ajax 由 HTML、JavaScript™ 技術、DHTML、XMLHttpRequest 物件和 DOM 組成,這一傑出的方法可以將笨拙的 Web 介面轉化成互動性的 Ajax 應用程式@@|
|borderless|k

{{item1{老技術,新技巧}}}

在談到 Ajax 時,實際上涉及到多種技術,要靈活地運用它必須深入瞭解這些不同的技術。好消息是您可能已經非常熟悉其中的大部分技術,更好的是這些技術都很容易學習,並不像完整的程式語言(如 Java 或 Ruby)那樣困難。
	
{{op1{Ajax 的定義}}}

順便說一下,Ajax 是 Asynchronous JavaScript and XML(以及 DHTML 等)的縮寫, 這個縮寫是 Adaptive Path 的 Jesse James Garrett 發明的

下面是 Ajax 應用程式所用到的基本技術:

    * HTML 用於建立 Web 表單並確認應用程式其他部分使用的欄位。
    * JavaScript 程式碼是執行 Ajax 應用程式的核心程式碼,幫助改進與伺服器應用程式的通訊。
    * DHTML 或 Dynamic HTML,用於動態更新表單。我們將使用 div、span 和其他動態 HTML 元素來標記 HTML。
    * 文件物件模型 DOM 用於(透過 JavaScript 程式碼)處理 HTML 結構和(某些情況下)伺服器返回的 XML。
    * XMLHttpRequest 通訊物件, 負責與後端伺服系統交換 XML, JSON, Text 等資料


{{item1{Ajax 程式撰寫與執行}}}
在學習筆記本中, 妳可以使用以下巨集, 來實作 Ajax 程式設計
{{{
<<toBalaAjax "ajax\AjaxCall.htm" "700" "450">>
}}}

__{{op1{參考文章}}}__
點選 [[toBalaAjax - Ajax 程式設計]] 或 [[toBalaAjax - 使用 XMLHTTP 物件]]


{{item1{Ajax Framework}}}
在學習筆記本中, 指定 jslib 目錄儲存 Ajax Framework (MooTools, jQuery, Dojo, Prototype,...)

__{{op1{jQuery}}}__
官方網站 : http://jquery.com/
jQuery 是一個快速又 簡潔的JavaScript程式庫, 簡化了讓你在HTML文件裡面尋找DOM物件, 處理事件, 製作動畫, 和處理Ajax互動的過程 jQuery 是來改變你撰寫 JavaScript 的方式 

參考網站 : [[優秀 jQuery 插件17個|http://ka-yue.com/blog/useful-jquery-plugin]]

__{{op1{MooTools}}}__
官方網站 : http://mootools.net/
MooTools is a compact, modular, Object-Oriented JavaScript framework designed for the intermediate to advanced JavaScript developer. It allows you to write powerful, flexible, and cross-browser code with its elegant, well documented, and coherent API.

__{{op1{Prototype}}}__
官方網站 : http://www.prototypejs.org/
Prototype is a JavaScript Framework that aims to ease development of dynamic web applications. Featuring a unique, easy-to-use toolkit for class-driven development and the nicest Ajax library around, Prototype is quickly becoming the codebase of choice for web application developers everywhere.

__{{op1{Dojo}}}__
官方網站 : http://dojotoolkit.org/
Dojo is an Open Source DHTML toolkit written in JavaScript. It builds on several contributed code bases (nWidgets, Burstlib, f(m)), which is why we refer to it sometimes as a "unified" toolkit. Dojo aims to solve some long-standing historical problems with DHTML which prevented mass adoption of dynamic web application development.

Dojo allows you to easily build dynamic capabilities into web pages and any other environment that supports JavaScript sanely. You can use the components that Dojo provides to make your web sites more usable, responsive, and functional. With Dojo you can build degradable user interfaces more easily, prototype interactive widgets quickly, and animate transitions. You can use the lower-level APIs and compatibility layers from Dojo to write portable JavaScript and simplify complex scripts. Dojo's event system, I/O APIs, and generic language enhancement form the basis of a powerful programming environment. You can use the Dojo build tools to write command-line unit-tests for your JavaScript code. The Dojo build process helps you optimize your JavaScript for deployment by grouping sets of files together and reuse those groups through "profiles".

Dojo does all of these things by layering capabilities onto a very small core which provides the package system and little else. When you write scripts with Dojo, you can include as little or as much of the available APIs as you need to suit your needs. Dojo provides multiple points of entry, interpreter independence, forward looking APIs, and focuses on reducing barriers to adoption.

<<toBalaNotes "ajaxframework">>



///%ajaxframework
//%/
&nbsp;
學習筆記本並沒有內建 J2SE 開發平台, 如要啟用 [學習筆記本] 的 Java 開發功能, 請自行依照以下操作步驟, 安裝 J2SE 開發平台

{{item1{建置 J2SE 開發平台}}}
1. 下載 JDK
&nbsp;&nbsp;&nbsp;&nbsp;請連接至 http://java.sun.com/javase/downloads/?intcmp=1281 下載 JDK

&nbsp;&nbsp;<<toBalaSWF2 "movie/JDK6_U2_Downlaod.swf" "820" "610" "教學影片 - 下載 JDK 1.6 Update 2">>

2. 安裝 JDK
&nbsp;&nbsp;&nbsp;&nbsp;- 執行安裝檔 (jdk-6u2-windows-i586-p.exe)
&nbsp;&nbsp;&nbsp;&nbsp;- JDK 版權宣告 (按下 Yes 即可)
&nbsp;&nbsp;&nbsp;&nbsp;- 選擇安裝項目(不要安裝 Demos, Java Sources, Java 2 Runtime Environment  這三個項目), 
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;並自行指定 ''安裝目錄'', 例如 : d:\jdk1.6.0_02

&nbsp;&nbsp;<<toBalaSWF2 "movie/Install_JDKU2.swf" "820" "610" "教學影片 - 安裝 JDK 1.6 Update 2">>

3. 將安裝好的 JDK 目錄 (d:\jdk1.6.0_02) 複製至 ''toBalaKMKNotepad\java'' 目錄中

4. 修改 ''toBalaKMKNotepad'' 目錄中的 ''jdkconfig.bat'' 批次檔, 如下 :
{{{
set JAVA_HOME=%CD%\java\jdk1.6.0_02
set JRE_HOME=%CD%\java\jdk1.6.0_02
}}}

5. 從視窗系統的 [控制台] 將安裝好的 JDK 移除

6. 啟動 [學習筆記本], 點選 [[toBalaJava2 - 編譯及執行 java 程式]] 進行測試


{{item1{撰寫, 編譯及執行 Java 程式}}}

1. 點選工具列中的 ''[新增文章]'' 按鈕

2. 輸入文章 ''[抬頭]'', 然後在文字區輸入以下 ''[巨集]'' 命令, 最後點選 ''[完成]''
{{{
<<toBalaJava2 "java\scjp\hello.java">>
}}}

3. 點選 ''[程式編輯]'' 按鈕, 然後輸入以下 Java 程式
{{{
public class hello {
   public static void main(String []  argv){
      System.out.println("Hi Guru");
   }
}
}}}

4. 點選 ''[編譯程式]'' 及 ''[執行程式]'' 按鈕

<<toBalaNotes "java2">>


///%java2
//%/
{{item1{SQLite 簡介}}}
官方網站 : http://www.sqlite.org/

SQL 資料庫系統是程式設計師的好幫手,對於大量的資料處理上是絕對不可或缺的。不過,如果稍微少一點的資料呢?

比如說:
{{{
    * 你有幾萬筆的資料,以後也不太會增加。
    * 你的程式可能只有單機使用,
    * 就算可以上網,你也不想就為了這個程式來架一個資料庫。
    * 這個應用程式沒有多人一起使用的需求。 
}}}
諸如此類的狀況,也許您也曾經遇到過。通常您有幾種作法:
{{{
    * 累一點,在同一台機器上架個資料庫系統。
    * 找個小型的資料庫系統,像是 GDBM,Windows 的Registry,Access,或是乾脆用 CSV 格式的文字檔。 
}}}
第一種方法,對於使用者來說可能會是個大問題。因為不是每個使用者都懂得,而且願意架設一個資料庫系統在自己的電腦上。甚至他的電腦夠不夠力跑一個資料庫系統都是問題。第二種方法,是許多人常用的方式,尤其在 Windows 系統下面,許多人常常用Registry 來作為軟體的資料庫。不過,話說回來,少掉了 SQL 的幫助,很多資料處理的功能都要自己來作,對於程式設計師來說真的是很累。

其實,還有另外一種折衷的方案,就是使用 ~SQLite 這樣的嵌入式資料庫。對於程式設計師來說,使用這樣的資料庫,與使用一般的 SQL 資料庫差異不大。一 般的 ~SQL92 語法都可以使用。而且不用架設任何系統起來,只要在編譯程式時把 ~SQLite 的程式庫一併編入。

而且 ~SQLite 的資料庫都是單一的檔案。 所以,要將軟體安裝到使用者的電腦上是再簡單不過的事情。

下面就來看看 ~SQLite 網站上對於這個嵌入式資料庫的介紹: ~SQLite 是一個很小的 C 語言程式庫。這個程式庫本身就完全包含資料庫引擎的功能,而且可以嵌入至其他程式中,完全不用額外的設定。其特性如下:
{{{
    * 支援 ACID (Atomic, Consistent, Isolated, Durable) 交易。
    * 零組態設定(Zero-configuration),無須管理者的設定及管理。
    * 支援大部分 ~SQL92 的語法。
    * 資料庫存在於一個單一的檔案中。
    * 資料庫系統所在機器的位元組順序( Byte order) 無關。
    * 支援大小至 2 terabytes (2^41 bytes)。
    * 極小的記憶體需求:小於 3 萬行的 C 語言程式碼。小於 250KB 的程式空間。
    * 大部分的資料庫操作皆快於一般流行的資料庫系統。
    * 簡單易用的 API。
    * 支援 TCL。也有其他語言的支援可用。
    * 註解詳細的程式碼,以及超過 90% 的測試。
    * 程式庫自己包含完整的功能,無須其他額外的程式或程式庫。
    * 程式碼版權為 public domain。任何用途皆可免費使用。 
}}}
在與其他的開發工具連結方面,~SQLite 幾乎可以在目前所有的主流開發工具,平台,程式語言上面使用:
{{{
    * BASIC
    * C/C++
    * ODBC
    * Java
    * JavaScript
    * .Net framework
    * Perl
    * PHP
    * Python
    * Ruby
}}}
而在作業系統支援上,除了目前大部分的 PC/NB 所使用的作業系統,如 Windows, Linux, ~FreeBSD, OS/2 ...等。在嵌入式系統上也有支援,如 Embedded 系統使用的 Linux, Win CE 及 Symbian 等等。 


{{item1{SQLite 實作說明}}}

You need to put all your sqlite shell commands in one file, say sqlitecmds.txt. This includes the dot commands and the SQL statements. So sqlitecmds.txt contains:
{{{
    .output testfile.txt
    select * from readcode where read_code glob 'G2*';
}}}
Then you need to put your DOS shell commands in another file, say readcode.bat. This file will contain the commnad to run sqlite and redirect its input to the file of sqlite commnads above. So readcode.bat contains:
{{{
    cd c:\test\ReadCodes
    c:\test\Program\sqlite3 c:\test\ReadCodes\ReadCode.db < sqlitecmds.txt
}}}
Then you tell the DOS shell (actually cmd.exe) to run the commands in your readcode.bat file by typing the batch file name at the command prompt.
{{{
    C:\>readcode
}}}
<nowiki>
This will execute your batch file commands, which will run the sqlite shell program, which will read and execute the commands in the sqlite commands file, which will write its output to the file testfile.txt in the current directory (which will be C:\test\ReadCodes).
</nowiki>



''__@@font-size:14px;產生 t1 資料表@@__''

<<toBalaFile "db\createTable.txt">><<toBalaRun "tbsqlite.exe"  "db\test.db db\createTable.txt"  "產生 t1 資料表"  "">>

''__@@font-size:14px;新增記錄@@__''

<<toBalaFile "db\insert.txt">><<toBalaRun "tbsqlite.exe"  "db\test.db db\insert.txt"  "新增記錄"  "">>

''__@@font-size:14px;查詢記錄@@__''

<<toBalaFile "db\query.txt">><<toBalaRun "tbsqlite.exe"  "db\test.db db\query.txt"  "查詢記錄"  "">>

''__@@font-size:14px;刪除記錄@@__''

<<toBalaFile "db\delete.txt">><<toBalaRun "tbsqlite.exe"  "db\test.db db\delete.txt"  "刪除記錄"  "">>

''__@@font-size:14px;刪除 t1 資料表@@__''

<<toBalaFile "db\dropTable.txt">><<toBalaRun "tbsqlite.exe"  "db\test.db db\dropTable.txt"  "刪除 t1 資料表"  "">>



{{item1{tbsqlite.bat 批次檔}}}
{{{
@echo off

REM 取得這個 Batch File 的目錄
set toBalaPath=%~p0

REM 取得這個 Batch File 的磁碟代號
set toBalaDrv=%~d0

REM 轉換磁碟及目錄
call %toBalaDrv%
cd %toBalaPath%

REM 設定環境變數
REM -----------------
set PATH=.;%CD%\tools;%CD%\tools\cmdtools;%PATH%

REM 執行 sqlite3 命令
REM ------------------
if "%~x0" == ".bat" goto n1
echo ^<head^> >%0.htm
echo ^<meta http-equiv="Content-Type" content="text/html; charset=utf-8"^> >>%0.htm
echo ^</head^> >>%0.htm
echo ^<body^> >>%0.htm
echo sqlite3  -html -header %1 ^< %2 >>%0.htm
echo ^<hr^> >>%0.htm 
echo ^<table style='font-size:12px' border="1" align="left"^> >>%0.htm
goto n2

:n1
echo 執行中, 請稍待 (檔案 : sqlite3 %1 ^< %2) 
echo. >%0.htm

:n2
sqlite3 -html -header %1  < %2  >> %0.htm 2>&1

REM 處理執行訊息
REM -------------------
if "%~x0" == ".bat" goto s1
echo ^</table^> >>%0.htm
echo ^</body^> >>%0.htm
exit

:s1
type %0.htm
echo.
echo 執行完成
echo.
pause
exit
}}}

///%db\createTable.txt
create table t1 (t1key INTEGER PRIMARY KEY,data TEXT,num double,timeEnter DATE);
//%/

///%db\insert.txt
insert into t1 (data,num) values ('This is sample data',3);
insert into t1 (data,num) values ('More sample data',6);
insert into t1 (data,num) values ('中文測試',9);
//%/

///%db\query.txt
select * from t1;
//%/

///%db\delete.txt
delete from t1;
//%/

///%db\dropTable.txt
drop table t1;
//%/

1. mount vm disk image (/var/lib/libvirt/images/*.img)
http://wahlau.5068.info/2010/06/mount-vm-disk-image-varliblibvirtimages.html

2. How to create virtual machines using KVM (Kernel-based Virtual Machine)
http://acidborg.wordpress.com/2010/02/18/how-to-create-virtual-machines-using-kvm-kernel-based-virtual-machine/

3. OVF (Open Virtualization Format) Standard Overview (Part 1)
http://blogs.sun.com/VirtualGuru/entry/ovf_open_virtualization_format_standard






本文網址 : http://www.zdnet.com.tw/news/software/0,2000085678,20145882,00.htm

儘管 Google 的開原碼暨相容性專案經理 Dan Morrill,聲稱 Android 平台的分裂是用來嚇唬菜鳥開發者的虛構故事,這仍是不爭的事實。

Android 的問題有兩個層面。第一,短短一年半的時間,已經有 6 次重大版本發佈。不論你如何努力簡化,開發者、OEM 和顧客,都必須承受許多修改。

第二個問題是,根據 Google 自己的資料,目前仍有 3 個修訂版被廣泛使用:

[img[img/androiduse.png]]

Android 2.1使用率最高,但Android Marketplace的顧客有超過半數使用Android 1.5和1.6,而這個數據即代表該平台的分裂已經存在。

但這構成問題嗎?我認為19個月之內6次重大發佈就是一個問題。那種變動頻率顯示Android的玩家屬性。對一般使用者而言,這種頻繁變動造就出一個令人極端困惑的市場,作業系統版本每幾個月變一次,相容性問題更無法避免。

但這種變動頻率無法長期持續。Android首長Andy Rubin說明了腳步放慢的情況:「現在我們的產品(更新)週期,基本上是1年2次。當一切開始慢慢確定後,最後可能減為1年1次。因為一個變動中的平台,開發者很難追得上。我希望開發者基本上能帶動創新,我不要他們必須去預測創新。」

雖然這種態度對長期發展有利,短期間不會有太明顯的差異。市場分裂的問題不會消失,但藉由謹慎的管理,新版Android不致於讓較早的版本立即被淘汰。

隨著Android逐漸成為主流,該平台也必須給使用者、開發者和OEM更多確定性,淡化其玩家屬性。(陳智文/譯)

http://caas.tmcnet.com/

What is CaaS?
Communications-as-a-Service (CaaS, typically pronounced ‘cass’) is an outsourcing model for enterprise communications. Such communications can include voice over IP, instant messaging, collaboration and videoconference applications using fixed and mobile devices. The CaaS vendor is responsible for all hardware and software management and offers guaranteed Quality of Service. CaaS allows businesses to selectively deploy communications devices and modes on a pay-as-you-go, as-needed basis.

This approach eliminates the large capital investment and ongoing overhead for a system whose capacity may often exceed or fall short of current demand. CaaS offers flexibility and expandability that small and medium-sized businesses might not otherwise afford, allowing for the addition of devices, modes or coverage on demand. The network capacity and feature set can be changed from day to day if necessary so hat functionality keeps pace with demand and resources are not wasted. There is no risk of the system becoming obsolete and requiring periodic major upgrades or replacement.
2011-02-15 08:00 作者是 謝良奇/編譯 

由 NASA 和管理服務供應商 Rackspace Hosting 共同開發的開放源碼雲端架構平台 OpenStack,日前獲得來自 Cisco Systems、Canonical 在內等新成員的支持。OpenStack 更宣佈代碼 Bexar 的第二次公開釋出版本,其中包含了對其計算 (Compute) 與物件儲存 (Object Storage) 平台的更新,並首度納入新的虛擬機器映像登錄與遞送服務 Glance。

OpenStack 是一套免費的開放源碼平台,可供服務供應商用以提供基礎架構,類似於 Amazon Web Services 的 EC2 和 S3。該平台主要分為 2 個部份,Nova 是最初由 NASA 所開發的電腦處理服務,Swift 則是 Rackspace 開發的儲存服務元件。

新釋出的 Compute 元件部份增加了對 IPv6、微軟 Hyper-V、iSCSI、Citrix XenAPI、XenServer 快照的支援,並且納入了代號 Glance 的映像找尋和遞送子專案,可達成工作負載在 OpenStack 雲端間的可移性。Bexar 釋出將 Object Storage 的物件由 5GB 擴充至不限大小。

OpenStack 專案此次公佈了 4 個新成員,其中包括 Cisco、Ubuntu Linux 商業贊助者 Canonical、Extreme Networks、Grid Dynamics。隨著新成員的加入,OpenStack 聯盟已達 50 個成員,其中包含了 AMD、Citrix、Dell、Intel。

同時也是 VMware 緊密合作夥伴的 Cisco,其雲端運算部門技術長 Lew Tucker 表示,Cisco 樂於宣佈該公司加入 OpenStack 社群的貢獻成員之列。他認為,網路供應與網路為基礎的服務是雲端運算中的基礎元件,他們期盼與社群一同確保此一開放源碼專案的成功。Rackspace 業務開發副總裁 Mark Collier 指出,Cisco 預期會為該專案貢獻程式碼,以便讓用戶更容易在 OpenStack 環境中設定 Cisco 的交換器。

儘管並非 OpenStack 會員之一,微軟表示其 Hyper-V 虛擬化軟體將可運用於 OpenStack。事實上這次釋出的 Bexar 版本中,就包含了對微軟 Hyper-V hypervisor 的支援,此一貢獻是由 Cloud.com 在微軟協助下所完成。OpenStack 過去已可支援紅帽的 KVM 和 Citrix Systems 的 XenServer。

Collier 表示,OpenStack的設計原本就與 hypervisor 相互獨立。他透露,列表上缺少的 VMware 的 ESX Server,將於今年稍後加入支援。OpenStack 貢獻者已經投入開發工作,其中包括了來自 Citrix Systems 的開發者。儘管 VMware 擁有自己的雲端建構軟體,vCloud,Collier 表示歡迎 VMware 隨時加入 OpenStack 聯盟。

OpenStack 的競爭對手包括 Amazon Web Services 的 EC2、相容於 Amazon Web Services 的開放源碼平台 Eucalyptus、VMware 私有的 vCloud、Nimbula Systems 的開放源碼 Director 雲端作業系統。不久前 Internap 成為 Rackspace 和 NASA 之外,第一家以 OpenStack 為基礎提供服務的公司。其 XIPCloud Storage 服務目前處於 beta 階段。

去年 10 月才推出第一個釋出版「Austin」的 OpenStack,其實還算是個初創的專案。因此 Rackspace 希望來自 Cisco 這類大公司的支持,可以讓企業對於該軟體更具信心。Collier 指出,有關雲端平台的決定影響重大,假如用戶想要採納一套雲端平台,特別是開放源碼平台,支持該平台的公司越可靠,對於該平台的存續用戶越能夠感到放心。

Canonical 主席 Mark Shuttleworth 表示,Canonical 會在預計 4 月推出的 Ubuntu 11.04 中,同時納入以 OpenStack 和 Eucalyptus 為基礎的雲端選項。Rackspace 技術長 Jim Curry 表示,Canonical 正在設法大幅簡化 OpenStack 的環境建置。過去 Canonical 一直是 Eucalyptus 的支持者,並以該平台作為 Ubuntu Enterprise Cloud 的基礎。Shuttleworth 指出,真正重要的是我們開始在雲端的基礎架構層次上,看到些許標準化的影子。Eucalyptus 和 OpenStack 都位於此一過程中的中心。

和 NASA 合作運行其雲端環境的 Anso Labs 共同創辦人 Jesse Andrews 表示,OpenStack 代號 Cactus 的下一釋出版本開發工作已經在進行中,其主要目標之一是為電信與服務供應商的大規模部署,提供足夠的穩定性,


相關網址:
1.Cisco 支持 OpenStack 雲端平台
http://news.yahoo.com/s/pcworld/20110203/tc_pcworld/ciscobacksopenstackcloudplatform
2.Canonical 與 Cisco 歡迎 OpenStack 的 Bexar 釋出
http://www.eweekeurope.co.uk/news/canonical-and-cisco-welcome-openstacks-bexar-releases-20057
3.OpenStack 釋出 Bexar 更新
http://www.v3.co.uk/v3/news/2274593/openstack-bexar-cloud-computing
4.OpenStack 的 Bexar 雲端作業系統增添 VM 映像登錄服務
http://www.sdtimes.com/link/35243
5.OpenStack 'Bexar' 釋出加入跨 Hypervisor 支援
http://www.informationweek.com/news/cloud-computing/infrastructure/showArticle.jhtml?articleID=229200430&subSection=News
6.Canonical 將 Ubuntu 帶向 OpenStack 雲端
http://www.zdnet.com/blog/open-source/canonical-brings-ubuntu-to-the-openstack-cloud/8204
7.OpenStack:只會有一種 Ubuntu 雲端平台
http://www.theregister.co.uk/2011/02/03/openstack_on_ubuntu_partnership/
8.Canonical 加入 OpenStack 社群
http://www.theinquirer.net/inquirer/news/2024215/canonical-joins-openstack-community
9.OpenStack 雲端平台新增 Bexar 釋出和支持者
http://www.zdnet.co.uk/news/cloud/2011/02/04/openstack-cloud-gets-bexar-release-and-backers-40091682/
10.OpenStack 雲端作業系統專案宣佈 Bexar 釋出和新夥伴
http://ostatic.com/blog/canonical-spreads-its-open-cloud-wings-with-openstack
本文網址 : http://news.cnet.com/8301-13846_3-10128773-62.html

Without a doubt, the cloud and all its forms and meanings were big news in 2008. Besides the huge growth of Amazon EC2 and Google App Engine, we saw Salesforce launch Force.com, a true platform-as-a-service.

My picks for the most interesting software of 2008 are Hadoop and Eucalyptus.

Hadoop is an Apache project, the "open source implementation of MapReduce, a powerful tool designed for the detailed analysis and transformation of very large data sets," which basically means you can process a ton of data on commodity hardware.

Hadoop is going commercial through Cloudera and while details are not publicly available, let's just say there are some very important and interesting foundations being laid for the way that people deal with computing and processing power.

Eucalyptus is an "open-source software infrastructure for implementing 'cloud computing' on clusters. The current interface to Eucalyptus is compatible with Amazon's EC2 interface, but the infrastructure is designed to support multiple client-side interfaces. Eucalyptus is implemented using commonly available Linux tools and basic Web-service technologies making it easy to install and maintain."

In layman's terms, Eucalyptus makes a group of Linux boxes act very similar to Amazon EC2, which means someday soon you will be able to run an internal cloud.

The fact that both of these platforms are open source speaks to two aspects of the evolution of the cloud.

* There is an appetite for software that is more bleeding-edge or like an "invention" to be open source as enterprises (and developers) want to be able to tweak it to their needs.
* Fewer and fewer companies are willing to plunk down huge dollar amounts for something that may/may not suit their needs. It will be difficult to justify spending anything in 2009, let alone for software that you haven't proven already.

The cloud will continue to evolve, but these two projects and the respective commercial efforts behind them will be interesting to watch.

Disclosure: I am an advisor to Eucalyptus.
Background: #fff
Foreground: #000
PrimaryPale: #fc8
PrimaryLight: #f81
PrimaryMid: #b40
PrimaryDark: #410
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
/***
|''Name:''|CryptoFunctionsPlugin|
|''Description:''|Support for cryptographic functions|
***/
//{{{
if(!version.extensions.CryptoFunctionsPlugin) {
version.extensions.CryptoFunctionsPlugin = {installed:true};

//--
//-- Crypto functions and associated conversion routines
//--

// Crypto "namespace"
function Crypto() {}

// Convert a string to an array of big-endian 32-bit words
Crypto.strToBe32s = function(str)
{
	var be = Array();
	var len = Math.floor(str.length/4);
	var i, j;
	for(i=0, j=0; i<len; i++, j+=4) {
		be[i] = ((str.charCodeAt(j)&0xff) << 24)|((str.charCodeAt(j+1)&0xff) << 16)|((str.charCodeAt(j+2)&0xff) << 8)|(str.charCodeAt(j+3)&0xff);
	}
	while (j<str.length) {
		be[j>>2] |= (str.charCodeAt(j)&0xff)<<(24-(j*8)%32);
		j++;
	}
	return be;
};

// Convert an array of big-endian 32-bit words to a string
Crypto.be32sToStr = function(be)
{
	var str = "";
	for(var i=0;i<be.length*32;i+=8)
		str += String.fromCharCode((be[i>>5]>>>(24-i%32)) & 0xff);
	return str;
};

// Convert an array of big-endian 32-bit words to a hex string
Crypto.be32sToHex = function(be)
{
	var hex = "0123456789ABCDEF";
	var str = "";
	for(var i=0;i<be.length*4;i++)
		str += hex.charAt((be[i>>2]>>((3-i%4)*8+4))&0xF) + hex.charAt((be[i>>2]>>((3-i%4)*8))&0xF);
	return str;
};

// Return, in hex, the SHA-1 hash of a string
Crypto.hexSha1Str = function(str)
{
	return Crypto.be32sToHex(Crypto.sha1Str(str));
};

// Return the SHA-1 hash of a string
Crypto.sha1Str = function(str)
{
	return Crypto.sha1(Crypto.strToBe32s(str),str.length);
};

// Calculate the SHA-1 hash of an array of blen bytes of big-endian 32-bit words
Crypto.sha1 = function(x,blen)
{
	// Add 32-bit integers, wrapping at 32 bits
	add32 = function(a,b)
	{
		var lsw = (a&0xFFFF)+(b&0xFFFF);
		var msw = (a>>16)+(b>>16)+(lsw>>16);
		return (msw<<16)|(lsw&0xFFFF);
	};
	// Add five 32-bit integers, wrapping at 32 bits
	add32x5 = function(a,b,c,d,e)
	{
		var lsw = (a&0xFFFF)+(b&0xFFFF)+(c&0xFFFF)+(d&0xFFFF)+(e&0xFFFF);
		var msw = (a>>16)+(b>>16)+(c>>16)+(d>>16)+(e>>16)+(lsw>>16);
		return (msw<<16)|(lsw&0xFFFF);
	};
	// Bitwise rotate left a 32-bit integer by 1 bit
	rol32 = function(n)
	{
		return (n>>>31)|(n<<1);
	};

	var len = blen*8;
	// Append padding so length in bits is 448 mod 512
	x[len>>5] |= 0x80 << (24-len%32);
	// Append length
	x[((len+64>>9)<<4)+15] = len;
	var w = Array(80);

	var k1 = 0x5A827999;
	var k2 = 0x6ED9EBA1;
	var k3 = 0x8F1BBCDC;
	var k4 = 0xCA62C1D6;

	var h0 = 0x67452301;
	var h1 = 0xEFCDAB89;
	var h2 = 0x98BADCFE;
	var h3 = 0x10325476;
	var h4 = 0xC3D2E1F0;

	for(var i=0;i<x.length;i+=16) {
		var j,t;
		var a = h0;
		var b = h1;
		var c = h2;
		var d = h3;
		var e = h4;
		for(j = 0;j<16;j++) {
			w[j] = x[i+j];
			t = add32x5(e,(a>>>27)|(a<<5),d^(b&(c^d)),w[j],k1);
			e=d; d=c; c=(b>>>2)|(b<<30); b=a; a = t;
		}
		for(j=16;j<20;j++) {
			w[j] = rol32(w[j-3]^w[j-8]^w[j-14]^w[j-16]);
			t = add32x5(e,(a>>>27)|(a<<5),d^(b&(c^d)),w[j],k1);
			e=d; d=c; c=(b>>>2)|(b<<30); b=a; a = t;
		}
		for(j=20;j<40;j++) {
			w[j] = rol32(w[j-3]^w[j-8]^w[j-14]^w[j-16]);
			t = add32x5(e,(a>>>27)|(a<<5),b^c^d,w[j],k2);
			e=d; d=c; c=(b>>>2)|(b<<30); b=a; a = t;
		}
		for(j=40;j<60;j++) {
			w[j] = rol32(w[j-3]^w[j-8]^w[j-14]^w[j-16]);
			t = add32x5(e,(a>>>27)|(a<<5),(b&c)|(d&(b|c)),w[j],k3);
			e=d; d=c; c=(b>>>2)|(b<<30); b=a; a = t;
		}
		for(j=60;j<80;j++) {
			w[j] = rol32(w[j-3]^w[j-8]^w[j-14]^w[j-16]);
			t = add32x5(e,(a>>>27)|(a<<5),b^c^d,w[j],k4);
			e=d; d=c; c=(b>>>2)|(b<<30); b=a; a = t;
		}

		h0 = add32(h0,a);
		h1 = add32(h1,b);
		h2 = add32(h2,c);
		h3 = add32(h3,d);
		h4 = add32(h4,e);
	}
	return Array(h0,h1,h2,h3,h4);
};


}
//}}}
首頁
/***
|''Name:''|DeprecatedFunctionsPlugin|
|''Description:''|Support for deprecated functions removed from core|
***/
//{{{
if(!version.extensions.DeprecatedFunctionsPlugin) {
version.extensions.DeprecatedFunctionsPlugin = {installed:true};

//--
//-- Deprecated code
//--

// @Deprecated: Use createElementAndWikify and this.termRegExp instead
config.formatterHelpers.charFormatHelper = function(w)
{
	w.subWikify(createTiddlyElement(w.output,this.element),this.terminator);
};

// @Deprecated: Use enclosedTextHelper and this.lookaheadRegExp instead
config.formatterHelpers.monospacedByLineHelper = function(w)
{
	var lookaheadRegExp = new RegExp(this.lookahead,"mg");
	lookaheadRegExp.lastIndex = w.matchStart;
	var lookaheadMatch = lookaheadRegExp.exec(w.source);
	if(lookaheadMatch && lookaheadMatch.index == w.matchStart) {
		var text = lookaheadMatch[1];
		if(config.browser.isIE)
			text = text.replace(/\n/g,"\r");
		createTiddlyElement(w.output,"pre",null,null,text);
		w.nextMatch = lookaheadRegExp.lastIndex;
	}
};

// @Deprecated: Use <br> or <br /> instead of <<br>>
config.macros.br = {};
config.macros.br.handler = function(place)
{
	createTiddlyElement(place,"br");
};

// Find an entry in an array. Returns the array index or null
// @Deprecated: Use indexOf instead
Array.prototype.find = function(item)
{
	var i = this.indexOf(item);
	return i == -1 ? null : i;
};

// Load a tiddler from an HTML DIV. The caller should make sure to later call Tiddler.changed()
// @Deprecated: Use store.getLoader().internalizeTiddler instead
Tiddler.prototype.loadFromDiv = function(divRef,title)
{
	return store.getLoader().internalizeTiddler(store,this,title,divRef);
};

// Format the text for storage in an HTML DIV
// @Deprecated Use store.getSaver().externalizeTiddler instead.
Tiddler.prototype.saveToDiv = function()
{
	return store.getSaver().externalizeTiddler(store,this);
};

// @Deprecated: Use store.allTiddlersAsHtml() instead
function allTiddlersAsHtml()
{
	return store.allTiddlersAsHtml();
}

// @Deprecated: Use refreshPageTemplate instead
function applyPageTemplate(title)
{
	refreshPageTemplate(title);
}

// @Deprecated: Use story.displayTiddlers instead
function displayTiddlers(srcElement,titles,template,unused1,unused2,animate,unused3)
{
	story.displayTiddlers(srcElement,titles,template,animate);
}

// @Deprecated: Use story.displayTiddler instead
function displayTiddler(srcElement,title,template,unused1,unused2,animate,unused3)
{
	story.displayTiddler(srcElement,title,template,animate);
}

// @Deprecated: Use functions on right hand side directly instead
var createTiddlerPopup = Popup.create;
var scrollToTiddlerPopup = Popup.show;
var hideTiddlerPopup = Popup.remove;

// @Deprecated: Use right hand side directly instead
var regexpBackSlashEn = new RegExp("\\\\n","mg");
var regexpBackSlash = new RegExp("\\\\","mg");
var regexpBackSlashEss = new RegExp("\\\\s","mg");
var regexpNewLine = new RegExp("\n","mg");
var regexpCarriageReturn = new RegExp("\r","mg");

}
//}}}
/***
|Name|DisableWikiLinksPlugin|
|Source|http://www.TiddlyTools.com/#DisableWikiLinksPlugin|
|Version|1.5.0|
|Author|Eric Shulman - ELS Design Studios|
|License|http://www.TiddlyTools.com/#LegalStatements <br>and [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]|
|~CoreVersion|2.1|
|Type|plugin|
|Requires||
|Overrides|Tiddler.prototype.autoLinkWikiWords, 'wikiLink' formatter|
|Options|##Configuration|
|Description|selectively disable TiddlyWiki's automatic ~WikiWord linking behavior|
This plugin allows you to disable TiddlyWiki's automatic ~WikiWord linking behavior, so that WikiWords embedded in tiddler content will be rendered as regular text, instead of being automatically converted to tiddler links.  To create a tiddler link when automatic linking is disabled, you must enclose the link text within {{{[[...]]}}}.
!!!!!Usage
<<<
You can block automatic WikiWord linking behavior for any specific tiddler by ''tagging it with<<tag excludeWikiWords>>'' (see configuration below) or, check a plugin option to disable automatic WikiWord links to non-existing tiddler titles, while still linking WikiWords that correspond to existing tiddlers titles or shadow tiddler titles.  You can also block specific selected WikiWords from being automatically linked by listing them in [[DisableWikiLinksList]] (see configuration below), separated by whitespace.  This tiddler is optional and, when present, causes the listed words to always be excluded, even if automatic linking of other WikiWords is being permitted.  

Note: WikiWords contained in default ''shadow'' tiddlers will be automatically linked unless you select an additional checkbox option lets you disable these automatic links as well, though this is not recommended, since it can make it more difficult to access some TiddlyWiki standard default content (such as AdvancedOptions or SideBarTabs)
<<<
!!!!!Configuration
<<<
<<option chkDisableWikiLinks>> Disable ALL automatic WikiWord tiddler links
<<option chkAllowLinksFromShadowTiddlers>> ... except for WikiWords //contained in// shadow tiddlers
<<option chkDisableNonExistingWikiLinks>> Disable automatic WikiWord links for non-existing tiddlers
Disable automatic WikiWord links for words listed in: <<option txtDisableWikiLinksList>>
Disable automatic WikiWord links for tiddlers tagged with: <<option txtDisableWikiLinksTag>>
<<<
!!!!!Code
***/
//{{{
version.extensions.disableWikiLinks= {major: 1, minor: 5, revision: 0, date: new Date(2007,6,9)};

if (config.options.chkDisableNonExistingWikiLinks==undefined) config.options.chkDisableNonExistingWikiLinks= false;
if (config.options.chkDisableWikiLinks==undefined) config.options.chkDisableWikiLinks=false;
if (config.options.txtDisableWikiLinksList==undefined) config.options.txtDisableWikiLinksList="DisableWikiLinksList";
if (config.options.chkAllowLinksFromShadowTiddlers==undefined) config.options.chkAllowLinksFromShadowTiddlers=true;
if (config.options.txtDisableWikiLinksTag==undefined) config.options.txtDisableWikiLinksTag="excludeWikiWords";

// find the formatter for wikiLink and replace handler with 'pass-thru' rendering
initDisableWikiLinksFormatter();
function initDisableWikiLinksFormatter() {
	for (var i=0; i<config.formatters.length && config.formatters[i].name!="wikiLink"; i++);
	config.formatters[i].coreHandler=config.formatters[i].handler;
	config.formatters[i].handler=function(w) {
		// supress any leading "~" (if present)
		var skip=(w.matchText.substr(0,1)==config.textPrimitives.unWikiLink)?1:0;
		var title=w.matchText.substr(skip);
		var exists=store.tiddlerExists(title);
		var inShadow=w.tiddler && store.isShadowTiddler(w.tiddler.title);

		// check for excluded Tiddler
		if (w.tiddler && w.tiddler.isTagged(config.options.txtDisableWikiLinksTag))
			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
		
		// check for specific excluded wiki words
		var t=store.getTiddlerText(config.options.txtDisableWikiLinksList)
		if (t && t.length && t.indexOf(w.matchText)!=-1)
			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }

		// if not disabling links from shadows (default setting)
		if (config.options.chkAllowLinksFromShadowTiddlers && inShadow)
			return this.coreHandler(w);

		// check for non-existing non-shadow tiddler
		if (config.options.chkDisableNonExistingWikiLinks && !exists)
			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }

		// if not enabled, just do standard WikiWord link formatting
		if (!config.options.chkDisableWikiLinks)
			return this.coreHandler(w);

		// just return text without linking
		w.outputText(w.output,w.matchStart+skip,w.nextMatch)
	}
}

Tiddler.prototype.coreAutoLinkWikiWords = Tiddler.prototype.autoLinkWikiWords;
Tiddler.prototype.autoLinkWikiWords = function()
{
	// DEBUG alert("processing: "+this.title);
	// if all automatic links are not disabled, just return results from core function
	if (!config.options.chkDisableWikiLinks)
		return this.coreAutoLinkWikiWords.apply(this,arguments);
	return false;
}
//}}}
破壞性創新(Disruptive Innovation)這個新詞是由1997年克雷頓、克里斯汀生所提出。他的理論讓商業世界對創新的定義重新興起另一波不同的見解。也因此改變了企業之間競爭的模式,意者不見強者更強,新進者永遠處於弱勢的狀態。

''創新的定義''
企業界對創新原本的定義,大部份覺得應該是更好的產品、更好的技術、更快的流程。依據傳統性的做法,企業在原有產品的發展下,只能維持漸進式(Incremental)的創新(意即讓產品不斷改善及更新)或者是激進式(Radical)的創新(發展不同的技術),這樣的發展結果,往往會把企業「推」進高階市場、限制企業往低階轉進。分析其主要原因,主要在於高階和低階市場的「規模」和「成本結構」有所不同。

''破壞性創新的定義''
但由克雷頓、克里斯汀生提出的破壞性創新(Disruptive Innovation)所詮譯新的定義則是更差的功能或是只比「沒有」還好一點的產品。這個定義簡單說明企業在競爭市場中,如果以破壞性創新開拓市場,最好把眼光瞄準低階市場,或尚未發現的新市場。因此;與上述的定義結果確實有很大的落差。

破壞性創新握有兩大特色:
{{{
1. 產品某些特色吸引了某些非目標的顧客(有可能是未開發的顧客或目標外的顧客)

2. 比原有產品便宜、使用者易用、便利。
}}}

例如:液晶電視在剛開始推出的時後,在消費者的心中實屬於高價品,也非當時主流的電視產品;所有人仍然繼續使用傳統CRT。但慢慢地,隨著科技發展,進步到符合消費者需要的性能時,甚至是更低價時,液晶電視就會開始大量取代原有的傳統CRT,甚至是讓傳統CRT重此消失蹤跡。

由無法商業化的產品,經由延續性的創新,漸漸符合低階使用者的需求,開始擁有小眾市場,到符合高階使用者需求,取代原有產品,成為主流產品,這將是破壞性創新最新的理論概念。也是顛覆商業市場的另一個開始。
/*{{{*/
 
/*}}}*/
/*{{{*/
a {color:#0044BB;font-weight:bold}
/*}}}*/
<!--{{{-->
<div class='toolbar' macro='toolbar +saveTiddler -cancelTiddler deleteTiddler'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='easyEdit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar +saveTiddler -cancelTiddler deleteTiddler'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<!--}}}-->
/***
|Name|ExportTiddlersPlugin|
|Source|http://www.TiddlyTools.com/#ExportTiddlersPlugin|
|Documentation|http://www.TiddlyTools.com/#ExportTiddlersPluginInfo|
|Version|2.7.0|
|Author|Eric Shulman - ELS Design Studios|
|License|http://www.TiddlyTools.com/#LegalStatements <br>and [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]|
|~CoreVersion|2.1|
|Type|plugin|
|Requires||
|Overrides||
|Description|select and extract tiddlers from your ~TiddlyWiki documents and save them to a separate file|
ExportTiddlersPlugin lets you select and extract tiddlers from your ~TiddlyWiki documents using interactive control panel lets you specify a destination, and then select which tiddlers to export. Tiddler data can be output as complete, stand-alone TiddlyWiki documents, or just the selected tiddlers ("~PureStore" format -- smaller files!) that can be imported directly into another ~TiddlyWiki, or as an ~RSS-compatible XML file that can be published for RSS syndication.
!!!!!Documentation
>see [[ExportTiddlersPluginInfo]]
!!!!!Code
***/
//{{{
// version
version.extensions.exportTiddlers = {major: 2, minor: 7, revision: 0, date: new Date(2008,5,27)};

// default shadow definition
config.shadowTiddlers.ExportTiddlers="<<exportTiddlers inline>>";

// add 'export' backstage task (following built-in import task)
if (config.tasks) { // TW2.2 or above
	config.tasks.exportTask = {
		text:"export",
		tooltip:"Export selected tiddlers to another file",
		content:"<<exportTiddlers inline>>"
	}
	config.backstageTasks.splice(config.backstageTasks.indexOf("importTask")+1,0,"exportTask");
}

// macro handler
config.macros.exportTiddlers = {
	label: "export tiddlers",
	prompt: "Copy selected tiddlers to an export document",
	newdefault: "export.html",
	datetimefmt: "0MM/0DD/YYYY 0hh:0mm:0ss" // for "filter date/time" edit fields
};

config.macros.exportTiddlers.handler = function(place,macroName,params) {
	if (params[0]!="inline")
		{ createTiddlyButton(place,this.label,this.prompt,onClickExportMenu); return; }
	var panel=createExportPanel(place);
	panel.style.position="static";
	panel.style.display="block";
}

function createExportPanel(place) {
	var panel=document.getElementById("exportPanel");
	if (panel) { panel.parentNode.removeChild(panel); }
	setStylesheet(config.macros.exportTiddlers.css,"exportTiddlers");
	panel=createTiddlyElement(place,"span","exportPanel",null,null)
	panel.innerHTML=config.macros.exportTiddlers.html;
	exportInitFilter();
	refreshExportList(0);
	var fn=document.getElementById("exportFilename");
	if (window.location.protocol=="file:" && !fn.value.length) {
		// get new target path/filename
		var newPath=getLocalPath(window.location.href);
		var slashpos=newPath.lastIndexOf("/"); if (slashpos==-1) slashpos=newPath.lastIndexOf("\\"); 
		if (slashpos!=-1) newPath=newPath.substr(0,slashpos+1); // trim filename
		fn.value=newPath+config.macros.exportTiddlers.newdefault;
	}
	return panel;
}

function onClickExportMenu(e)
{
	if (!e) var e = window.event;
	var parent=resolveTarget(e).parentNode;
	var panel = document.getElementById("exportPanel");
	if (panel==undefined || panel.parentNode!=parent)
		panel=createExportPanel(parent);
	var isOpen = panel.style.display=="block";
	if(config.options.chkAnimate)
		anim.startAnimating(new Slider(panel,!isOpen,e.shiftKey || e.altKey,"none"));
	else
		panel.style.display = isOpen ? "none" : "block" ;
	if (panel.style.display!="none") { // update list and set focus when panel is made visible
		refreshExportList(0);
		var fn=document.getElementById("exportFilename"); fn.focus(); fn.select();
	}
	e.cancelBubble = true;
	if (e.stopPropagation) e.stopPropagation();
	return(false);
}
//}}}

// // IE needs explicit scoping for functions called by browser events
//{{{
window.onClickExportMenu=onClickExportMenu;
window.onClickExportButton=onClickExportButton;
window.exportShowFilterFields=exportShowFilterFields;
window.refreshExportList=refreshExportList;
//}}}

// // CSS for floating export control panel
//{{{
config.macros.exportTiddlers.css = '\
#exportPanel {\
	display: none; position:absolute; z-index:12; width:45em; right:105%; top:6em;\
	background-color: #eee; color:#000; font-size: 10pt; line-height:110%;\
	border:1px solid black; border-bottom-width: 3px; border-right-width: 3px;\
	padding: 0.5em; margin:0em; -moz-border-radius:1em;\
}\
#exportPanel a, #exportPanel td a { color:#009; display:inline; margin:0px; padding:1px; }\
#exportPanel table { width:100%; border:0px; padding:0px; margin:0px; font-size:10pt; line-height:110%; background:transparent; }\
#exportPanel tr { border:0px;padding:0px;margin:0px; background:transparent; }\
#exportPanel td { color:#000; border:0px;padding:0px;margin:0px; background:transparent; }\
#exportPanel select { width:98%;margin:0px;font-size:10pt;line-height:110%;}\
#exportPanel input  { width:98%;padding:0px;margin:0px;font-size:10pt;line-height:110%; }\
#exportPanel textarea  { width:98%;padding:0px;margin:0px;overflow:auto;font-size:10pt; }\
#exportPanel .box { border:1px solid black; padding:3px; margin-bottom:5px; background:#f8f8f8; -moz-border-radius:5px; }\
#exportPanel .topline { border-top:2px solid black; padding-top:3px; margin-bottom:5px; }\
#exportPanel .rad { width:auto;border:0 }\
#exportPanel .chk { width:auto;border:0 }\
#exportPanel .btn { width:auto; }\
#exportPanel .btn1 { width:98%; }\
#exportPanel .btn2 { width:48%; }\
#exportPanel .btn3 { width:32%; }\
#exportPanel .btn4 { width:24%; }\
#exportPanel .btn5 { width:19%; }\
';
//}}}

// // HTML for export control panel interface
//{{{
config.macros.exportTiddlers.html = '\
<!-- target path/file  -->\
<div>\
export to path/filename:<br>\
<input type="text" id="exportFilename" size=40 style="width:93%"><input \
	type="button" id="exportBrowse" value="..." title="select or enter a local folder/file..." style="width:5%" \
	onclick="var fn=window.promptForExportFilename(this); if (fn.length) this.previousSibling.value=fn; ">\
</div>\
\
<!-- output format -->\
<div>\
output file format:\
<select id="exportFormat" size=1>\
<option value="TW">TiddlyWiki document (includes core code)</option>\
<option value="DIV">TiddlyWiki "PureStore" file (tiddler data only)</option>\
<option value="XML">XML (for RSS newsfeed)</option>\
</select>\
</div>\
\
<!-- notes -->\
<div>\
notes:<br>\
<textarea id="exportNotes" rows=3 cols=40 style="height:4em;margin-bottom:5px;" onfocus="this.select()"></textarea> \
</div>\
\
<!-- list of tiddlers -->\
<table><tr align="left"><td>\
	select:\
	<a href="JavaScript:;" id="exportSelectAll"\
		onclick="onClickExportButton(this)" title="select all tiddlers">\
		&nbsp;all&nbsp;</a>\
	<a href="JavaScript:;" id="exportSelectChanges"\
		onclick="onClickExportButton(this)" title="select tiddlers changed since last save">\
		&nbsp;changes&nbsp;</a> \
	<a href="JavaScript:;" id="exportSelectOpened"\
		onclick="onClickExportButton(this)" title="select tiddlers currently being displayed">\
		&nbsp;opened&nbsp;</a> \
	<a href="JavaScript:;" id="exportSelectRelated"\
		onclick="onClickExportButton(this)" title="select all tiddlers related (by link or transclusion) to the currently selected tiddlers">\
		&nbsp;related&nbsp;</a> \
	<a href="JavaScript:;" id="exportToggleFilter"\
		onclick="onClickExportButton(this)" title="show/hide selection filter">\
		&nbsp;filter&nbsp;</a>  \
</td><td align="right">\
	<a href="JavaScript:;" id="exportListSmaller"\
		onclick="onClickExportButton(this)" title="reduce list size">\
		&nbsp;&#150;&nbsp;</a>\
	<a href="JavaScript:;" id="exportListLarger"\
		onclick="onClickExportButton(this)" title="increase list size">\
		&nbsp;+&nbsp;</a>\
</td></tr></table>\
<select id="exportList" multiple size="20" style="margin-bottom:5px;"\
	onchange="refreshExportList(this.selectedIndex)">\
</select><br>\
</div><!--box-->\
\
<!-- selection filter -->\
<div id="exportFilterPanel" style="display:none">\
<table><tr align="left"><td>\
	selection filter\
</td><td align="right">\
	<a href="JavaScript:;" id="exportHideFilter"\
		onclick="onClickExportButton(this)" title="hide selection filter">hide</a>\
</td></tr></table>\
<div class="box">\
<input type="checkbox" class="chk" id="exportFilterStart" value="1"\
	onclick="exportShowFilterFields(this)"> starting date/time<br>\
<table cellpadding="0" cellspacing="0"><tr valign="center"><td width="50%">\
	<select size=1 id="exportFilterStartBy" onchange="exportShowFilterFields(this);">\
		<option value="0">today</option>\
		<option value="1">yesterday</option>\
		<option value="7">a week ago</option>\
		<option value="30">a month ago</option>\
		<option value="site">SiteDate</option>\
		<option value="file">file date</option>\
		<option value="other">other (mm/dd/yyyy hh:mm)</option>\
	</select>\
</td><td width="50%">\
	<input type="text" id="exportStartDate" onfocus="this.select()"\
		onchange="document.getElementById(\'exportFilterStartBy\').value=\'other\';">\
</td></tr></table>\
<input type="checkbox" class="chk" id="exportFilterEnd" value="1"\
	onclick="exportShowFilterFields(this)"> ending date/time<br>\
<table cellpadding="0" cellspacing="0"><tr valign="center"><td width="50%">\
	<select size=1 id="exportFilterEndBy" onchange="exportShowFilterFields(this);">\
		<option value="0">today</option>\
		<option value="1">yesterday</option>\
		<option value="7">a week ago</option>\
		<option value="30">a month ago</option>\
		<option value="site">SiteDate</option>\
		<option value="file">file date</option>\
		<option value="other">other (mm/dd/yyyy hh:mm)</option>\
	</select>\
</td><td width="50%">\
	<input type="text" id="exportEndDate" onfocus="this.select()"\
		onchange="document.getElementById(\'exportFilterEndBy\').value=\'other\';">\
</td></tr></table>\
<input type="checkbox" class="chk" id=exportFilterTags value="1"\
	onclick="exportShowFilterFields(this)"> match tags<br>\
<input type="text" id="exportTags" onfocus="this.select()">\
<input type="checkbox" class="chk" id=exportFilterText value="1"\
	onclick="exportShowFilterFields(this)"> match titles/tiddler text<br>\
<input type="text" id="exportText" onfocus="this.select()">\
</div> <!--box-->\
</div> <!--panel-->\
\
<!-- action buttons -->\
<div style="text-align:center">\
<input type=button class="btn4" onclick="onClickExportButton(this)"\
	id="exportFilter" value="apply filter">\
<input type=button class="btn4" onclick="onClickExportButton(this)"\
	id="exportStart" value="export tiddlers">\
<input type=button class="btn4" onclick="onClickExportButton(this)"\
	id="exportDelete" value="delete tiddlers">\
<input type=button class="btn4" onclick="onClickExportButton(this)"\
	id="exportClose" value="close">\
</div><!--center-->\
';
//}}}

// // initialize interface

// // exportInitFilter()
//{{{
function exportInitFilter() {
	// start date
	document.getElementById("exportFilterStart").checked=false;
	document.getElementById("exportStartDate").value="";
	// end date
	document.getElementById("exportFilterEnd").checked=false;
	document.getElementById("exportEndDate").value="";
	// tags
	document.getElementById("exportFilterTags").checked=false;
	document.getElementById("exportTags").value="";
	// text
	document.getElementById("exportFilterText").checked=false;
	document.getElementById("exportText").value="";
	// show/hide filter input fields
	exportShowFilterFields();
}
//}}}

// // exportShowFilterFields(which)
//{{{
function exportShowFilterFields(which) {
	var show;

	show=document.getElementById('exportFilterStart').checked;
	document.getElementById('exportFilterStartBy').style.display=show?"block":"none";
	document.getElementById('exportStartDate').style.display=show?"block":"none";
	var val=document.getElementById('exportFilterStartBy').value;
	document.getElementById('exportStartDate').value
		=getFilterDate(val,'exportStartDate').formatString(config.macros.exportTiddlers.datetimefmt);
	 if (which && (which.id=='exportFilterStartBy') && (val=='other'))
		document.getElementById('exportStartDate').focus();

	show=document.getElementById('exportFilterEnd').checked;
	document.getElementById('exportFilterEndBy').style.display=show?"block":"none";
	document.getElementById('exportEndDate').style.display=show?"block":"none";
	var val=document.getElementById('exportFilterEndBy').value;
	document.getElementById('exportEndDate').value
		=getFilterDate(val,'exportEndDate').formatString(config.macros.exportTiddlers.datetimefmt);
	 if (which && (which.id=='exportFilterEndBy') && (val=='other'))
		document.getElementById('exportEndDate').focus();

	show=document.getElementById('exportFilterTags').checked;
	document.getElementById('exportTags').style.display=show?"block":"none";

	show=document.getElementById('exportFilterText').checked;
	document.getElementById('exportText').style.display=show?"block":"none";
}
//}}}

// // onClickExportButton(which): control interactions
//{{{
function onClickExportButton(which)
{
	// DEBUG alert(which.id);
	var theList=document.getElementById('exportList'); if (!theList) return;
	var count = 0;
	var total = store.getTiddlers('title').length;
	switch (which.id)
		{
		case 'exportFilter':
			count=filterExportList();
			var panel=document.getElementById('exportFilterPanel');
			if (count==-1) { panel.style.display='block'; break; }
			document.getElementById("exportStart").disabled=(count==0);
			document.getElementById("exportDelete").disabled=(count==0);
			clearMessage(); displayMessage("filtered "+formatExportMessage(count,total));
			if (count==0) { alert("No tiddlers were selected"); panel.style.display='block'; }
			break;
		case 'exportStart':
			exportTiddlers();
			break;
		case 'exportDelete':
			exportDeleteTiddlers();
			break;
		case 'exportHideFilter':
		case 'exportToggleFilter':
			var panel=document.getElementById('exportFilterPanel')
			panel.style.display=(panel.style.display=='block')?'none':'block';
			break;
		case 'exportSelectChanges':
			var lastmod=new Date(document.lastModified);
			for (var t = 0; t < theList.options.length; t++) {
				if (theList.options[t].value=="") continue;
				var tiddler=store.getTiddler(theList.options[t].value); if (!tiddler) continue;
				theList.options[t].selected=(tiddler.modified>lastmod);
				count += (tiddler.modified>lastmod)?1:0;
			}
			document.getElementById("exportStart").disabled=(count==0);
			document.getElementById("exportDelete").disabled=(count==0);
			clearMessage(); displayMessage(formatExportMessage(count,total));
			if (count==0) alert("There are no unsaved changes");
			break;
		case 'exportSelectAll':
			for (var t = 0; t < theList.options.length; t++) {
				if (theList.options[t].value=="") continue;
				theList.options[t].selected=true;
				count += 1;
			}
			document.getElementById("exportStart").disabled=(count==0);
			document.getElementById("exportDelete").disabled=(count==0);
			clearMessage(); displayMessage(formatExportMessage(count,count));
			break;
		case 'exportSelectOpened':
			for (var t = 0; t < theList.options.length; t++) theList.options[t].selected=false;
			var tiddlerDisplay = document.getElementById("tiddlerDisplay"); // for TW2.1-
			if (!tiddlerDisplay) tiddlerDisplay = document.getElementById("storyDisplay"); // for TW2.2+
			for (var t=0;t<tiddlerDisplay.childNodes.length;t++) {
				var tiddler=tiddlerDisplay.childNodes[t].id.substr(7);
				for (var i = 0; i < theList.options.length; i++) {
					if (theList.options[i].value!=tiddler) continue;
					theList.options[i].selected=true; count++; break;
				}
			}
			document.getElementById("exportStart").disabled=(count==0);
			document.getElementById("exportDelete").disabled=(count==0);
			clearMessage(); displayMessage(formatExportMessage(count,total));
			if (count==0) alert("There are no tiddlers currently opened");
			break;
		case 'exportSelectRelated':
			// recursively build list of related tiddlers
			function getRelatedTiddlers(tid,tids) {
				var t=store.getTiddler(tid); if (!t || tids.contains(tid)) return tids;
				tids.push(t.title);
				if (!t.linksUpdated) t.changed();
				for (var i=0; i<t.links.length; i++)
					if (t.links[i]!=tid) tids=getRelatedTiddlers(t.links[i],tids);
				return tids;
			}
			// for all currently selected tiddlers, gather up the related tiddlers (including self) and select them as well
			var tids=[];
			for (var i=0; i<theList.options.length; i++)
				if (theList.options[i].selected) tids=getRelatedTiddlers(theList.options[i].value,tids);
			// select related tiddlers (includes original selected tiddlers)
			for (var i=0; i<theList.options.length; i++)
				theList.options[i].selected=tids.contains(theList.options[i].value);
			clearMessage(); displayMessage(formatExportMessage(tids.length,total));
			break;
		case 'exportListSmaller':	// decrease current listbox size
			var min=5;
			theList.size-=(theList.size>min)?1:0;
			break;
		case 'exportListLarger':	// increase current listbox size
			var max=(theList.options.length>25)?theList.options.length:25;
			theList.size+=(theList.size<max)?1:0;
			break;
		case 'exportClose':
			document.getElementById('exportPanel').style.display='none';
			break;
		}
}
//}}}

// // promptForFilename(msg,path,file) uses platform/browser specific functions to get local filespec
//{{{
window.promptForExportFilename=function(here)
{
	var msg=here.title; // use tooltip as dialog box message
	var path=getLocalPath(document.location.href);
	var slashpos=path.lastIndexOf("/"); if (slashpos==-1) slashpos=path.lastIndexOf("\\"); 
	if (slashpos!=-1) path = path.substr(0,slashpos+1); // remove filename from path, leave the trailing slash
	var file=config.macros.exportTiddlers.newdefault;
	var result="";
	if(window.Components) { // moz
		try {
			netscape.security.PrivilegeManager.enablePrivilege('UniversalXPConnect');
			var nsIFilePicker = window.Components.interfaces.nsIFilePicker;
			var picker = Components.classes['@mozilla.org/filepicker;1'].createInstance(nsIFilePicker);
			picker.init(window, msg, nsIFilePicker.modeSave);
			var thispath = Components.classes['@mozilla.org/file/local;1'].createInstance(Components.interfaces.nsILocalFile);
			thispath.initWithPath(path);
			picker.displayDirectory=thispath;
			picker.defaultExtension='html';
			picker.defaultString=file;
			picker.appendFilters(nsIFilePicker.filterAll|nsIFilePicker.filterText|nsIFilePicker.filterHTML);
			if (picker.show()!=nsIFilePicker.returnCancel) var result=picker.file.persistentDescriptor;
		}
		catch(e) { alert('error during local file access: '+e.toString()) }
	}
	else { // IE
		try { // XPSP2 IE only
			var s = new ActiveXObject('UserAccounts.CommonDialog');
			s.Filter='All files|*.*|Text files|*.txt|HTML files|*.htm;*.html|';
			s.FilterIndex=3; // default to HTML files;
			s.InitialDir=path;
			s.FileName=file;
			if (s.showOpen()) var result=s.FileName;
		}
		catch(e) {  // fallback
			var result=prompt(msg,path+file);
		}
	}
	return result;
}
//}}}

// // list display
//{{{
function formatExportMessage(count,total)
{
	var txt=total+' tiddler'+((total!=1)?'s':'')+" - ";
	txt += (count==0)?"none":(count==total)?"all":count;
	txt += " selected for export";
	return txt;
}

function refreshExportList(selectedIndex)
{
	var theList  = document.getElementById("exportList");
	var sort;
	if (!theList) return;
	// get the sort order
	if (!selectedIndex)   selectedIndex=0;
	if (selectedIndex==0) sort='modified';
	if (selectedIndex==1) sort='title';
	if (selectedIndex==2) sort='modified';
	if (selectedIndex==3) sort='modifier';
	if (selectedIndex==4) sort='tags';

	// unselect headings and count number of tiddlers actually selected
	var count=0;
	for (var t=5; t < theList.options.length; t++) {
		if (!theList.options[t].selected) continue;
		if (theList.options[t].value!="")
			count++;
		else { // if heading is selected, deselect it, and then select and count all in section
			theList.options[t].selected=false;
			for ( t++; t<theList.options.length && theList.options[t].value!=""; t++) {
				theList.options[t].selected=true;
				count++;
			}
		}
	}

	// disable "export" and "delete" buttons if no tiddlers selected
	document.getElementById("exportStart").disabled=(count==0);
	document.getElementById("exportDelete").disabled=(count==0);
	// show selection count
	var tiddlers = store.getTiddlers('title');
	if (theList.options.length) { clearMessage(); displayMessage(formatExportMessage(count,tiddlers.length)); }

	// if a [command] item, reload list... otherwise, no further refresh needed
	if (selectedIndex>4)  return;

	// clear current list contents
	while (theList.length > 0) { theList.options[0] = null; }
	// add heading and control items to list
	var i=0;
	var indent=String.fromCharCode(160)+String.fromCharCode(160);
	theList.options[i++]=
		new Option(tiddlers.length+" tiddlers in document", "",false,false);
	theList.options[i++]=
		new Option(((sort=="title"        )?">":indent)+' [by title]', "",false,false);
	theList.options[i++]=
		new Option(((sort=="modified")?">":indent)+' [by date]', "",false,false);
	theList.options[i++]=
		new Option(((sort=="modifier")?">":indent)+' [by author]', "",false,false);
	theList.options[i++]=
		new Option(((sort=="tags"	)?">":indent)+' [by tags]', "",false,false);
	// output the tiddler list
	switch(sort)
		{
		case "title":
			for(var t = 0; t < tiddlers.length; t++)
				theList.options[i++] = new Option(tiddlers[t].title,tiddlers[t].title,false,false);
			break;
		case "modifier":
		case "modified":
			var tiddlers = store.getTiddlers(sort);
			// sort descending for newest date first
			tiddlers.sort(function (a,b) {if(a[sort] == b[sort]) return(0); else return (a[sort] > b[sort]) ? -1 : +1; });
			var lastSection = "";
			for(var t = 0; t < tiddlers.length; t++)
				{
				var tiddler = tiddlers[t];
				var theSection = "";
				if (sort=="modified") theSection=tiddler.modified.toLocaleDateString();
				if (sort=="modifier") theSection=tiddler.modifier;
				if (theSection != lastSection)
					{
					theList.options[i++] = new Option(theSection,"",false,false);
					lastSection = theSection;
					}
				theList.options[i++] = new Option(indent+indent+tiddler.title,tiddler.title,false,false);
				}
			 break;
		case "tags":
			var theTitles = {}; // all tiddler titles, hash indexed by tag value
			var theTags = new Array();
			for(var t=0; t<tiddlers.length; t++) {
				var title=tiddlers[t].title;
				var tags=tiddlers[t].tags;
				if (!tags || !tags.length) {
					if (theTitles["untagged"]==undefined) { theTags.push("untagged"); theTitles["untagged"]=new Array(); }
					theTitles["untagged"].push(title);
				}
				else for(var s=0; s<tags.length; s++) {
					if (theTitles[tags[s]]==undefined) { theTags.push(tags[s]); theTitles[tags[s]]=new Array(); }
					theTitles[tags[s]].push(title);
				}
			}
			theTags.sort();
			for(var tagindex=0; tagindex<theTags.length; tagindex++) {
				var theTag=theTags[tagindex];
				theList.options[i++]=new Option(theTag,"",false,false);
				for(var t=0; t<theTitles[theTag].length; t++)
					theList.options[i++]=new Option(indent+indent+theTitles[theTag][t],theTitles[theTag][t],false,false);
			}
			break;
		}
	theList.selectedIndex=selectedIndex;		  // select current control item
	document.getElementById("exportStart").disabled=true;
	document.getElementById("exportDelete").disabled=true;
	clearMessage(); displayMessage(formatExportMessage(0,tiddlers.length));
}
//}}}

// // list filtering
//{{{
function getFilterDate(val,id)
{
	var result=0;
	switch (val) {
		case 'site':
			var timestamp=store.getTiddlerText("SiteDate");
			if (!timestamp) timestamp=document.lastModified;
			result=new Date(timestamp);
			break;
		case 'file':
			result=new Date(document.lastModified);
			break;
		case 'other':
			result=new Date(document.getElementById(id).value);
			break;
		default: // today=0, yesterday=1, one week=7, two weeks=14, a month=31
			var now=new Date(); var tz=now.getTimezoneOffset()*60000; now-=tz;
			var oneday=86400000;
			if (id=='exportStartDate')
				result=new Date((Math.floor(now/oneday)-val)*oneday+tz);
			else
				result=new Date((Math.floor(now/oneday)-val+1)*oneday+tz-1);
			break;
	}
	// DEBUG alert('getFilterDate('+val+','+id+')=='+result+"\nnow="+now);
	return result;
}

function filterExportList()
{
	var theList  = document.getElementById("exportList"); if (!theList) return -1;

	var filterStart=document.getElementById("exportFilterStart").checked;
	var val=document.getElementById("exportFilterStartBy").value;
	var startDate=getFilterDate(val,'exportStartDate');

	var filterEnd=document.getElementById("exportFilterEnd").checked;
	var val=document.getElementById("exportFilterEndBy").value;
	var endDate=getFilterDate(val,'exportEndDate');

	var filterTags=document.getElementById("exportFilterTags").checked;
	var tags=document.getElementById("exportTags").value;

	var filterText=document.getElementById("exportFilterText").checked;
	var text=document.getElementById("exportText").value;

	if (!(filterStart||filterEnd||filterTags||filterText)) {
		alert("Please set the selection filter");
		document.getElementById('exportFilterPanel').style.display="block";
		return -1;
	}
	if (filterStart&&filterEnd&&(startDate>endDate)) {
		var msg="starting date/time:\n"
		msg+=startDate.toLocaleString()+"\n";
		msg+="is later than ending date/time:\n"
		msg+=endDate.toLocaleString()
		alert(msg);
		return -1;
	}

	// if filter by tags, set up conditional expression
	if (filterTags) {
		var all = store.getTags(); // get list of all tags
		for (var i=0; i<all.length; i++) all[i]=all[i][0]; // remove tag counts
		// convert "tag1 AND ( tag2 OR NOT tag3 )"
		// into javascript expression containing regexp tests:
		// "/\~tag1\~/.test(...) && ( /\~tag2\~/.test(...) || ! /\~tag2\~/.test(...) )"
		var c=tags;
		c = c.replace(/[\[\]]/g,""); // remove [[...]] quoting around tagvalues
		// change AND/OR/NOT/parens to javascript operators and delimit terms with "~"
		c = c.replace(/\sand\s/ig,"~&&~");
		c = c.replace(/\sor\s/ig,"~||~");
		c = c.replace(/(\s)?not([\s\(])/ig,"~!~$2");
		c = c.replace(/([\(\)])/ig,"~$1~");
		// change existing tags to regexp tests and non-existing tags to "false"
		var terms=c.split("~");
		for (var i=0; i<terms.length; i++) { var t=terms[i];
			if (/(&&)|(\|\|)|[!\(\)]/.test(t) || t=="") continue; // skip operators/parens/spaces
			terms[i]=!all.contains(t)?"false":("/\\~"+t+"\\~/.test(tiddlertags)");
		}
		c=terms.join(" ");
	}
	function matchTags(t,c) {
		if (!c||!c.trim().length) return false;
		// assemble tags from tiddler into string "~tag1~tag2~tag3~"
		var tiddlertags = "~"+t.tags.join("~")+"~";
		// eval string against boolean test expression
		try { if(eval(c)) return true; }
		catch(e) { displayMessage(e.toString()); }
		return false;
	}
	
	// scan list and select tiddlers that match all applicable criteria
	var total=0;
	var count=0;
	for (var i=0; i<theList.options.length; i++) {
		// get item, skip non-tiddler list items (section headings)
		var opt=theList.options[i]; if (opt.value=="") continue;
		// get tiddler, skip missing tiddlers (this should NOT happen)
		var tiddler=store.getTiddler(opt.value); if (!tiddler) continue; 
		var sel=true;
		if ( (filterStart && tiddler.modified<startDate)
		|| (filterEnd && tiddler.modified>endDate)
		|| (filterTags && !matchTags(tiddler,c))
		|| (filterText && (tiddler.text.indexOf(text)==-1) && (tiddler.title.indexOf(text)==-1)))
			sel=false;
		opt.selected=sel;
		count+=sel?1:0;
		total++;
	}
	return count;
}
//}}}

// // OUTPUT FORMATTING AND FILE I/O
//{{{
function exportTWHeader()
{
	// get the TiddlyWiki core code source
	var sourcefile=getLocalPath(document.location.href);
	var source=loadFile(sourcefile);
	if(source==null) { alert(config.messages.cantSaveError); return null; }
	// reset existing HTML source markup
	source=updateMarkupBlock(source,"PRE-HEAD");
	source=updateMarkupBlock(source,"POST-HEAD");
	source=updateMarkupBlock(source,"PRE-BODY");
	source=updateMarkupBlock(source,"POST-BODY");
	// find store area
	var posOpeningDiv=source.indexOf(startSaveArea);
	var posClosingDiv=source.lastIndexOf(endSaveArea);
	if((posOpeningDiv==-1)||(posClosingDiv==-1))
		{ alert(config.messages.invalidFileError.format([sourcefile])); return; }
	// return everything up to store area
	return source.substr(0,posOpeningDiv+startSaveArea.length);
}

function exportTWFooter()
{
	// get the TiddlyWiki core code source
	var sourcefile=getLocalPath(document.location.href);
	var source=loadFile(sourcefile);
	if(source==null) { alert(config.messages.cantSaveError); return null; }
	// reset existing HTML source markup
	source=updateMarkupBlock(source,"PRE-HEAD");
	source=updateMarkupBlock(source,"POST-HEAD");
	source=updateMarkupBlock(source,"PRE-BODY");
	source=updateMarkupBlock(source,"POST-BODY");
	// find store area
	var posOpeningDiv=source.indexOf(startSaveArea);
	var posClosingDiv=source.lastIndexOf(endSaveArea);
	if((posOpeningDiv==-1)||(posClosingDiv==-1))
		{ alert(config.messages.invalidFileError.format([sourcefile])); return; }
	// return everything after store area
	return source.substr(posClosingDiv);
}

function exportDIVHeader()
{
	var out=[];
	var now = new Date();
	var title = convertUnicodeToUTF8(wikifyPlain("SiteTitle").htmlEncode());
	var subtitle = convertUnicodeToUTF8(wikifyPlain("SiteSubtitle").htmlEncode());
	var user = convertUnicodeToUTF8(config.options.txtUserName.htmlEncode());
	var twver = version.major+"."+version.minor+"."+version.revision;
	var pver = version.extensions.exportTiddlers.major+"."
		+version.extensions.exportTiddlers.minor+"."+version.extensions.exportTiddlers.revision;
	out.push("<html><body>");
	out.push("<style type=\"text/css\">");
	out.push("#storeArea {display:block;margin:1em;}");
	out.push("#storeArea div");
	out.push("{padding:0.5em;margin:1em;border:2px solid black;height:10em;overflow:auto;}");
	out.push("#javascriptWarning");
	out.push("{width:100%;text-align:left;background-color:#eeeeee;padding:1em;}");
	out.push("</style>");
	out.push("<div id=\"javascriptWarning\">");
	out.push("TiddlyWiki export file<br>");
	out.push("Source"+": <b>"+convertUnicodeToUTF8(document.location.href)+"</b><br>");
	out.push("Title: <b>"+title+"</b><br>");
	out.push("Subtitle: <b>"+subtitle+"</b><br>");
	out.push("Created: <b>"+now.toLocaleString()+"</b> by <b>"+user+"</b><br>");
	out.push("TiddlyWiki "+twver+" / "+"ExportTiddlersPlugin "+pver+"<br>");
	out.push("Notes:<hr><pre>"+document.getElementById("exportNotes").value.replace(/\n/g,"<br>")+"</pre>");
	out.push("</div>");
	out.push("<div id=\"storeArea\">");
	return out;
}

function exportDIVFooter()
{
	return ["</div><!--POST-BODY-START-->\n<!--POST-BODY-END--></body></html>"];
}

function exportXMLHeader()
{
	var out=[];
	var now = new Date();
	var u = store.getTiddlerText("SiteUrl",null);
	var title = convertUnicodeToUTF8(wikifyPlain("SiteTitle").htmlEncode());
	var subtitle = convertUnicodeToUTF8(wikifyPlain("SiteSubtitle").htmlEncode());
	var user = convertUnicodeToUTF8(config.options.txtUserName.htmlEncode());
	var twver = version.major+"."+version.minor+"."+version.revision;
	var pver = version.extensions.exportTiddlers.major+"."
		+version.extensions.exportTiddlers.minor+"."+version.extensions.exportTiddlers.revision;
	out.push("<" + "?xml version=\"1.0\"?" + ">");
	out.push("<rss version=\"2.0\">");
	out.push("<channel>");
	out.push("<title>" + title + "</title>");
	if(u) out.push("<link>" + convertUnicodeToUTF8(u.htmlEncode()) + "</link>");
	out.push("<description>" + subtitle + "</description>");
	out.push("<language>en-us</language>");
	out.push("<copyright>Copyright " + now.getFullYear() + " " + user + "</copyright>");
	out.push("<pubDate>" + now.toGMTString() + "</pubDate>");
	out.push("<lastBuildDate>" + now.toGMTString() + "</lastBuildDate>");
	out.push("<docs>http://blogs.law.harvard.edu/tech/rss</docs>");
	out.push("<generator>TiddlyWiki "+twver+" plus ExportTiddlersPlugin "+pver+"</generator>");
	return out;
}

function exportXMLFooter()
{
	return ["</channel></rss>"];
}

function exportData(target,list,fmt)
{
	function getData(s,f,t) { var r="";
		switch (f) {
			case "TW": r=s.getSaver().externalizeTiddler(s,t); break;
			case "DIV": r=t.title+"\n"+s.getSaver().externalizeTiddler(s,t); break;
			case "XML": r=t.saveToRss(store.getTiddlerText("SiteUrl","")); break;
		}
		return convertUnicodeToUTF8(r);
	}

	var out=[]; var tids=[];
	// get selected tiddlers
	for (var i=0; i<list.options.length; i++) {
		var opt=list.options[i]; if (!opt.selected||!opt.value.length) continue;
		var tid=store.getTiddler(opt.value); if (!tid) continue;
		tids.push(tid.title);
		out.push(getData(store,fmt,tid));
	}
	var count=out.length;
	// merge with existing tiddlers
	var text=loadFile(target);
	if (text && text.length) {
		var msg=target+"\nalready contains tiddler definitions.\n";
		msg+="\nPress OK to add new/revised tiddlers to current file contents.";
		msg+="\nPress Cancel to completely replace file contents";
		var remoteStore=new TiddlyWiki();
		if (remoteStore.importTiddlyWiki(text) && confirm(msg)) {
			var existing=remoteStore.getTiddlers("title");
			for (var i=0; i<existing.length; i++)
				if (!tids.contains(existing[i].title))
					out.push(getData(remoteStore,fmt,existing[i]));
			var msg="Merged %0 new/revised tiddlers and %1 existing tiddlers";
			displayMessage(msg.format([count,out.length-count]));
		}
	}
	return out;
}
//}}}

// // exportTiddlers(): output selected data to local file
//{{{
function exportTiddlers()
{
	clearMessage();
	var list  = document.getElementById("exportList"); if (!list) return;
	var fmt = document.getElementById("exportFormat").value;
	var target = document.getElementById("exportFilename").value.trim();
	if (!target.length) {
		displayMessage("A local target path/filename is required",target);
		return;
	}
	switch (fmt) {
		case "TW":	var head=exportTWHeader(); break;
		case "DIV":	var head=exportDIVHeader(); break;
		case "XML":	var head=exportXMLHeader(); break;
	}
	var theData=exportData(target,list,fmt);
	var c=theData.length;
	switch (fmt) {
		case "TW":	var foot=exportTWFooter(); break;
		case "DIV":	var foot=exportDIVFooter(); break;
		case "XML":	var foot=exportXMLFooter(); break;
	}
	var out=[]; var txt=out.concat(head,theData,foot).join("\n");
	var msg="An error occurred while saving to "+target;
	if (saveFile(target,txt)) msg=c+" tiddler"+((c!=1)?"s":"")+" written to "+target;
	displayMessage(msg,"file:///"+target);
}
//}}}

// // exportDeleteTiddlers(): delete selected tiddlers from file
//{{{
function exportDeleteTiddlers()
{
	var list=document.getElementById("exportList"); if (!list) return;
	var tids=[];
	for (i=0;i<list.length;i++)
		if (list.options[i].selected && list.options[i].value.length)
			tids.push(list.options[i].value);
	if (!confirm("Are you sure you want to delete these tiddlers:\n\n"+tids.join(', '))) return;
	store.suspendNotifications();
	for (t=0;t<tids.length;t++) {
		var tid=store.getTiddler(tids[t]); if (!tid) continue;
		if (tid.tags.contains("systemConfig"))
			if (!confirm("'"+tid.title+"' is tagged with 'systemConfig'.\n\nRemoving this tiddler may cause unexpected results.  Are you sure?"))
				continue;
		store.removeTiddler(tid.title);
		story.closeTiddler(tid.title);
	}
	store.resumeNotifications();
	alert(tids.length+" tiddlers deleted");
	refreshExportList(0); // reload listbox
	store.notifyAll(); // update page display
}
//}}}
/***
|''Name:''|FieldsEditorPlugin|
|''Description:''|//create//, //edit//, //view// and //delete// commands in toolbar <<toolbar fields>>.|
|''Version:''|1.0.2|
|''Date:''|Dec 21,2007|
|''Source:''|http://visualtw.ouvaton.org/VisualTW.html|
|''Author:''|Pascal Collin|
|''License:''|[[BSD open source license|License]]|
|''~CoreVersion:''|2.2.0|
|''Browser:''|Firefox 2.0; InternetExplorer 6.0, others|
!Demo:
On [[homepage|http://visualtw.ouvaton.org/VisualTW.html]], see [[FieldEditor example]]
!Installation:
*import this tiddler from [[homepage|http://visualtw.ouvaton.org/VisualTW.html]] (tagged as systemConfig)
*save and reload
*optionnaly : add the following css text in your StyleSheet : {{{#popup tr.fieldTableRow td {padding:1px 3px 1px 3px;}}}}
!Code
***/

//{{{

config.commands.fields.handlePopup = function(popup,title) {
	var tiddler = store.fetchTiddler(title);
	if(!tiddler)
		return;
	var fields = {};
	store.forEachField(tiddler,function(tiddler,fieldName,value) {fields[fieldName] = value;},true);
	var items = [];
	for(var t in fields) {
		var editCommand = "<<untiddledCall editFieldDialog "+escape(title)+" "+escape(t)+">>";
		var deleteCommand = "<<untiddledCall deleteField "+escape(title)+" "+escape(t)+">>";
		var renameCommand = "<<untiddledCall renameField "+escape(title)+" "+escape(t)+">>";
		items.push({field: t,value: fields[t], actions: editCommand+renameCommand+deleteCommand});
	}
	items.sort(function(a,b) {return a.field < b.field ? -1 : (a.field == b.field ? 0 : +1);});
	var createNewCommand = "<<untiddledCall createField "+escape(title)+">>";
	items.push({field : "", value : "", actions:createNewCommand });
	if(items.length > 0)
		ListView.create(popup,items,this.listViewTemplate);
	else
		createTiddlyElement(popup,"div",null,null,this.emptyText);
}

config.commands.fields.listViewTemplate = {
	columns: [
		{name: 'Field', field: 'field', title: "Field", type: 'String'},
		{name: 'Actions', field: 'actions', title: "Actions", type: 'WikiText'},
		{name: 'Value', field: 'value', title: "Value", type: 'WikiText'}
	],
	rowClasses: [
			{className: 'fieldTableRow', field: 'actions'}
	],
	buttons: [	//can't use button for selected then delete, because click on checkbox will hide the popup
	]
}

config.macros.untiddledCall = {  // when called from listview, tiddler is unset, so we need to pass tiddler as parameter
	handler : function(place,macroName,params,wikifier,paramString) {
		var macroName = params.shift();
		if (macroName) var macro = config.macros[macroName];
		var title = params.shift();
		if (title) var tiddler = store.getTiddler(unescape(title));
		if (macro) macro.handler(place,macroName,params,wikifier,paramString,tiddler);		
	}
}

config.macros.deleteField = {
	handler : function(place,macroName,params,wikifier,paramString,tiddler) {
		if(!readOnly && params[0]) {
			fieldName = unescape(params[0]);
			var btn = createTiddlyButton(place,"delete", "delete "+fieldName,this.onClickDeleteField);
			btn.setAttribute("title",tiddler.title);
			btn.setAttribute("fieldName", fieldName);
		}
	},
	onClickDeleteField : function() {
		var title=this.getAttribute("title");
		var fieldName=this.getAttribute("fieldName");
		var tiddler = store.getTiddler(title);
		if (tiddler && fieldName && confirm("delete field " + fieldName+" from " + title +" tiddler ?")) {
			delete tiddler.fields[fieldName];
			store.saveTiddler(tiddler.title,tiddler.title,tiddler.text,tiddler.modifier,tiddler.modified,tiddler.tags,tiddler.fields);
			story.refreshTiddler(title,"ViewTemplate",true);
		}
		return false;
	}
}

config.macros.createField = {
	handler : function(place,macroName,params,wikifier,paramString,tiddler) {
		if(!readOnly) {
			var btn = createTiddlyButton(place,"create new", "create a new field",this.onClickCreateField);
			btn.setAttribute("title",tiddler.title);
		}
	},
	onClickCreateField : function() {
		var title=this.getAttribute("title");
		var tiddler = store.getTiddler(title);
		if (tiddler) {
			var fieldName = prompt("Field name","");
			if (store.getValue(tiddler,fieldName)) {
				window.alert("This field already exists.");
			}
			else if (fieldName) {
				var v = prompt("Field value","");
				tiddler.fields[fieldName]=v;
				store.saveTiddler(tiddler.title,tiddler.title,tiddler.text,tiddler.modifier,tiddler.modified,tiddler.tags,tiddler.fields);
				story.refreshTiddler(title,"ViewTemplate",true);
			}
		}
		return false;
	}
}

config.macros.editFieldDialog = {
	handler : function(place,macroName,params,wikifier,paramString,tiddler) {
		if(!readOnly && params[0]) {
			fieldName = unescape(params[0]);
			var btn = createTiddlyButton(place,"edit", "edit this field",this.onClickEditFieldDialog);
			btn.setAttribute("title",tiddler.title);
			btn.setAttribute("fieldName", fieldName);
		}
	},
	onClickEditFieldDialog : function() {
		var title=this.getAttribute("title");
		var tiddler = store.getTiddler(title);
		var fieldName=this.getAttribute("fieldName");
		if (tiddler && fieldName) {
			var value = tiddler.fields[fieldName];
			value = value ? value : "";
			var lines = value.match(/\n/mg);
			lines = lines ? true : false;
			if (!lines || confirm("This field contains more than one line. Only the first line will be kept if you edit it here. Proceed ?")) {
				var v = prompt("Field value",value);
				tiddler.fields[fieldName]=v;
				store.saveTiddler(tiddler.title,tiddler.title,tiddler.text,tiddler.modifier,tiddler.modified,tiddler.tags,tiddler.fields);
				story.refreshTiddler(title,"ViewTemplate",true);
			}
		}
		return false;
	}
}

config.macros.renameField = {
	handler : function(place,macroName,params,wikifier,paramString,tiddler) {
		if(!readOnly && params[0]) {
			fieldName = unescape(params[0]);
			var btn = createTiddlyButton(place,"rename", "rename "+fieldName,this.onClickRenameField);
			btn.setAttribute("title",tiddler.title);
			btn.setAttribute("fieldName", fieldName);
		}
	},
	onClickRenameField : function() {
		var title=this.getAttribute("title");
		var fieldName=this.getAttribute("fieldName");
		var tiddler = store.getTiddler(title);
		if (tiddler && fieldName) {
			var newName = prompt("Rename " + fieldName + " as ?", fieldName);
			if (newName) {
				tiddler.fields[newName]=tiddler.fields[fieldName];
				delete tiddler.fields[fieldName];
				store.saveTiddler(tiddler.title,tiddler.title,tiddler.text,tiddler.modifier,tiddler.modified,tiddler.tags,tiddler.fields);
				story.refreshTiddler(title,"ViewTemplate",true);
			}
		}
		return false;
	}
}

config.shadowTiddlers.StyleSheetFieldsEditor = "/*{{{*/\n";
config.shadowTiddlers.StyleSheetFieldsEditor += ".fieldTableRow td {padding : 1px 3px}\n";
config.shadowTiddlers.StyleSheetFieldsEditor += ".fieldTableRow .button {border:0; padding : 0 0.2em}\n";
config.shadowTiddlers.StyleSheetFieldsEditor +="/*}}}*/";
store.addNotification("StyleSheetFieldsEditor", refreshStyles);

//}}}
/***
|''Name:''|ForEachTiddlerPlugin|
|''Version:''|1.0.8 (2007-04-12)|
|''Source:''|http://tiddlywiki.abego-software.de/#ForEachTiddlerPlugin|
|''Author:''|UdoBorkowski (ub [at] abego-software [dot] de)|
|''Licence:''|[[BSD open source license (abego Software)|http://www.abego-software.de/legal/apl-v10.html]]|
|''Copyright:''|&copy; 2005-2007 [[abego Software|http://www.abego-software.de]]|
|''TiddlyWiki:''|1.2.38+, 2.0|
|''Browser:''|Firefox 1.0.4+; Firefox 1.5; InternetExplorer 6.0|
!Description

Create customizable lists, tables etc. for your selections of tiddlers. Specify the tiddlers to include and their order through a powerful language.

''Syntax:'' 
|>|{{{<<}}}''forEachTiddler'' [''in'' //tiddlyWikiPath//] [''where'' //whereCondition//] [''sortBy'' //sortExpression// [''ascending'' //or// ''descending'']] [''script'' //scriptText//] [//action// [//actionParameters//]]{{{>>}}}|
|//tiddlyWikiPath//|The filepath to the TiddlyWiki the macro should work on. When missing the current TiddlyWiki is used.|
|//whereCondition//|(quoted) JavaScript boolean expression. May refer to the build-in variables {{{tiddler}}} and  {{{context}}}.|
|//sortExpression//|(quoted) JavaScript expression returning "comparable" objects (using '{{{<}}}','{{{>}}}','{{{==}}}'. May refer to the build-in variables {{{tiddler}}} and  {{{context}}}.|
|//scriptText//|(quoted) JavaScript text. Typically defines JavaScript functions that are called by the various JavaScript expressions (whereClause, sortClause, action arguments,...)|
|//action//|The action that should be performed on every selected tiddler, in the given order. By default the actions [[addToList|AddToListAction]] and [[write|WriteAction]] are supported. When no action is specified [[addToList|AddToListAction]]  is used.|
|//actionParameters//|(action specific) parameters the action may refer while processing the tiddlers (see action descriptions for details). <<tiddler [[JavaScript in actionParameters]]>>|
|>|~~Syntax formatting: Keywords in ''bold'', optional parts in [...]. 'or' means that exactly one of the two alternatives must exist.~~|

See details see [[ForEachTiddlerMacro]] and [[ForEachTiddlerExamples]].

!Code
***/
//{{{

	
//============================================================================
//============================================================================
//		   ForEachTiddlerPlugin
//============================================================================
//============================================================================

// Only install once
if (!version.extensions.ForEachTiddlerPlugin) {

if (!window.abego) window.abego = {};

version.extensions.ForEachTiddlerPlugin = {
	major: 1, minor: 0, revision: 8, 
	date: new Date(2007,3,12), 
	source: "http://tiddlywiki.abego-software.de/#ForEachTiddlerPlugin",
	licence: "[[BSD open source license (abego Software)|http://www.abego-software.de/legal/apl-v10.html]]",
	copyright: "Copyright (c) abego Software GmbH, 2005-2007 (www.abego-software.de)"
};

// For backward compatibility with TW 1.2.x
//
if (!TiddlyWiki.prototype.forEachTiddler) {
	TiddlyWiki.prototype.forEachTiddler = function(callback) {
		for(var t in this.tiddlers) {
			callback.call(this,t,this.tiddlers[t]);
		}
	};
}

//============================================================================
// forEachTiddler Macro
//============================================================================

version.extensions.forEachTiddler = {
	major: 1, minor: 0, revision: 8, date: new Date(2007,3,12), provider: "http://tiddlywiki.abego-software.de"};

// ---------------------------------------------------------------------------
// Configurations and constants 
// ---------------------------------------------------------------------------

config.macros.forEachTiddler = {
	 // Standard Properties
	 label: "forEachTiddler",
	 prompt: "Perform actions on a (sorted) selection of tiddlers",

	 // actions
	 actions: {
		 addToList: {},
		 write: {}
	 }
};

// ---------------------------------------------------------------------------
//  The forEachTiddler Macro Handler 
// ---------------------------------------------------------------------------

config.macros.forEachTiddler.getContainingTiddler = function(e) {
	while(e && !hasClass(e,"tiddler"))
		e = e.parentNode;
	var title = e ? e.getAttribute("tiddler") : null; 
	return title ? store.getTiddler(title) : null;
};

config.macros.forEachTiddler.handler = function(place,macroName,params,wikifier,paramString,tiddler) {
	// config.macros.forEachTiddler.traceMacroCall(place,macroName,params,wikifier,paramString,tiddler);

	if (!tiddler) tiddler = config.macros.forEachTiddler.getContainingTiddler(place);
	// --- Parsing ------------------------------------------

	var i = 0; // index running over the params
	// Parse the "in" clause
	var tiddlyWikiPath = undefined;
	if ((i < params.length) && params[i] == "in") {
		i++;
		if (i >= params.length) {
			this.handleError(place, "TiddlyWiki path expected behind 'in'.");
			return;
		}
		tiddlyWikiPath = this.paramEncode((i < params.length) ? params[i] : "");
		i++;
	}

	// Parse the where clause
	var whereClause ="true";
	if ((i < params.length) && params[i] == "where") {
		i++;
		whereClause = this.paramEncode((i < params.length) ? params[i] : "");
		i++;
	}

	// Parse the sort stuff
	var sortClause = null;
	var sortAscending = true; 
	if ((i < params.length) && params[i] == "sortBy") {
		i++;
		if (i >= params.length) {
			this.handleError(place, "sortClause missing behind 'sortBy'.");
			return;
		}
		sortClause = this.paramEncode(params[i]);
		i++;

		if ((i < params.length) && (params[i] == "ascending" || params[i] == "descending")) {
			 sortAscending = params[i] == "ascending";
			 i++;
		}
	}

	// Parse the script
	var scriptText = null;
	if ((i < params.length) && params[i] == "script") {
		i++;
		scriptText = this.paramEncode((i < params.length) ? params[i] : "");
		i++;
	}

	// Parse the action. 
	// When we are already at the end use the default action
	var actionName = "addToList";
	if (i < params.length) {
	   if (!config.macros.forEachTiddler.actions[params[i]]) {
			this.handleError(place, "Unknown action '"+params[i]+"'.");
			return;
		} else {
			actionName = params[i]; 
			i++;
		}
	} 
	
	// Get the action parameter
	// (the parsing is done inside the individual action implementation.)
	var actionParameter = params.slice(i);


	// --- Processing ------------------------------------------
	try {
		this.performMacro({
				place: place, 
				inTiddler: tiddler,
				whereClause: whereClause, 
				sortClause: sortClause, 
				sortAscending: sortAscending, 
				actionName: actionName, 
				actionParameter: actionParameter, 
				scriptText: scriptText, 
				tiddlyWikiPath: tiddlyWikiPath});

	} catch (e) {
		this.handleError(place, e);
	}
};

// Returns an object with properties "tiddlers" and "context".
// tiddlers holds the (sorted) tiddlers selected by the parameter,
// context the context of the execution of the macro.
//
// The action is not yet performed.
//
// @parameter see performMacro
//
config.macros.forEachTiddler.getTiddlersAndContext = function(parameter) {

	var context = config.macros.forEachTiddler.createContext(parameter.place, parameter.whereClause, parameter.sortClause, parameter.sortAscending, parameter.actionName, parameter.actionParameter, parameter.scriptText, parameter.tiddlyWikiPath, parameter.inTiddler);

	var tiddlyWiki = parameter.tiddlyWikiPath ? this.loadTiddlyWiki(parameter.tiddlyWikiPath) : store;
	context["tiddlyWiki"] = tiddlyWiki;
	
	// Get the tiddlers, as defined by the whereClause
	var tiddlers = this.findTiddlers(parameter.whereClause, context, tiddlyWiki);
	context["tiddlers"] = tiddlers;

	// Sort the tiddlers, when sorting is required.
	if (parameter.sortClause) {
		this.sortTiddlers(tiddlers, parameter.sortClause, parameter.sortAscending, context);
	}

	return {tiddlers: tiddlers, context: context};
};

// Returns the (sorted) tiddlers selected by the parameter.
//
// The action is not yet performed.
//
// @parameter see performMacro
//
config.macros.forEachTiddler.getTiddlers = function(parameter) {
	return this.getTiddlersAndContext(parameter).tiddlers;
};

// Performs the macros with the given parameter.
//
// @param parameter holds the parameter of the macro as separate properties.
//				  The following properties are supported:
//
//						place
//						whereClause
//						sortClause
//						sortAscending
//						actionName
//						actionParameter
//						scriptText
//						tiddlyWikiPath
//
//					All properties are optional. 
//					For most actions the place property must be defined.
//
config.macros.forEachTiddler.performMacro = function(parameter) {
	var tiddlersAndContext = this.getTiddlersAndContext(parameter);

	// Perform the action
	var actionName = parameter.actionName ? parameter.actionName : "addToList";
	var action = config.macros.forEachTiddler.actions[actionName];
	if (!action) {
		this.handleError(parameter.place, "Unknown action '"+actionName+"'.");
		return;
	}

	var actionHandler = action.handler;
	actionHandler(parameter.place, tiddlersAndContext.tiddlers, parameter.actionParameter, tiddlersAndContext.context);
};

// ---------------------------------------------------------------------------
//  The actions 
// ---------------------------------------------------------------------------

// Internal.
//
// --- The addToList Action -----------------------------------------------
//
config.macros.forEachTiddler.actions.addToList.handler = function(place, tiddlers, parameter, context) {
	// Parse the parameter
	var p = 0;

	// Check for extra parameters
	if (parameter.length > p) {
		config.macros.forEachTiddler.createExtraParameterErrorElement(place, "addToList", parameter, p);
		return;
	}

	// Perform the action.
	var list = document.createElement("ul");
	place.appendChild(list);
	for (var i = 0; i < tiddlers.length; i++) {
		var tiddler = tiddlers[i];
		var listItem = document.createElement("li");
		list.appendChild(listItem);
		createTiddlyLink(listItem, tiddler.title, true);
	}
};

abego.parseNamedParameter = function(name, parameter, i) {
	var beginExpression = null;
	if ((i < parameter.length) && parameter[i] == name) {
		i++;
		if (i >= parameter.length) {
			throw "Missing text behind '%0'".format([name]);
		}
		
		return config.macros.forEachTiddler.paramEncode(parameter[i]);
	}
	return null;
}

// Internal.
//
// --- The write Action ---------------------------------------------------
//
config.macros.forEachTiddler.actions.write.handler = function(place, tiddlers, parameter, context) {
	// Parse the parameter
	var p = 0;
	if (p >= parameter.length) {
		this.handleError(place, "Missing expression behind 'write'.");
		return;
	}

	var textExpression = config.macros.forEachTiddler.paramEncode(parameter[p]);
	p++;

	// Parse the "begin" option
	var beginExpression = abego.parseNamedParameter("begin", parameter, p);
	if (beginExpression !== null) 
		p += 2;
	var endExpression = abego.parseNamedParameter("end", parameter, p);
	if (endExpression !== null) 
		p += 2;
	var noneExpression = abego.parseNamedParameter("none", parameter, p);
	if (noneExpression !== null) 
		p += 2;

	// Parse the "toFile" option
	var filename = null;
	var lineSeparator = undefined;
	if ((p < parameter.length) && parameter[p] == "toFile") {
		p++;
		if (p >= parameter.length) {
			this.handleError(place, "Filename expected behind 'toFile' of 'write' action.");
			return;
		}
		
		filename = config.macros.fo