#
# AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: 0\n"
"POT-Creation-Date: 2012-10-17T05:19:03\n"
"PO-Revision-Date: 2010-07-14 16:33+0100\n"
"Last-Translator: RaSca <rasca@miamammausalinux.org>\n"
"Language-Team: None\n"
"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
#. Tag: title
#, no-c-format
msgid "Configuration Recap"
msgstr "Riepilogo delle configurazioni"
#. Tag: title
#, no-c-format
msgid "Final Cluster Configuration"
msgstr "Configurazione finale del cluster"
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs resource\n"
" Master/Slave Set: WebDataClone [WebData]\n"
" Masters: [ pcmk-2 pcmk-1 ]\n"
" Clone Set: dlm-clone [dlm]\n"
" Started: [ pcmk-2 pcmk-1 ]\n"
" Clone Set: ClusterIP-clone [ClusterIP] (unique)\n"
" ClusterIP:0 (ocf::heartbeat:IPaddr2) Started\n"
" ClusterIP:1 (ocf::heartbeat:IPaddr2) Started\n"
" Clone Set: WebFS-clone [WebFS]\n"
" Started: [ pcmk-1 pcmk-2 ]\n"
" Clone Set: WebSite-clone [WebSite]\n"
" Started: [ pcmk-1 pcmk-2 ]\n"
"# pcs resource rsc defaults\n"
"resource-stickiness: 100\n"
"# pcs resource op defaults\n"
"timeout: 240s\n"
"# pcs stonith\n"
" impi-fencing (stonith:fence_ipmilan) Started\n"
"# pcs property\n"
"dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n"
"cluster-infrastructure: corosync\n"
"no-quorum-policy: ignore\n"
"stonith-enabled: true\n"
"# pcs constraint\n"
"Location Constraints:\n"
"Ordering Constraints:\n"
" ClusterIP-clone then WebSite-clone\n"
" WebDataClone then WebSite-clone\n"
" WebFS-clone then WebSite-clone\n"
"Colocation Constraints:\n"
" WebSite-clone with ClusterIP-clone\n"
" WebFS-clone with WebDataClone (with-rsc-role:Master)\n"
" WebSite-clone with WebFS-clone\n"
"#\n"
"# pcs status\n"
"\n"
"Last updated: Fri Sep 14 13:45:34 2012\n"
"Last change: Fri Sep 14 13:43:13 2012 via cibadmin on pcmk-1\n"
"Stack: corosync\n"
"Current DC: pcmk-1 (1) - partition with quorum\n"
"Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n"
"2 Nodes configured, unknown expected votes\n"
"11 Resources configured.\n"
"\n"
"Online: [ pcmk-1 pcmk-2 ]\n"
"\n"
"Full list of resources:\n"
"\n"
" Master/Slave Set: WebDataClone [WebData]\n"
" Masters: [ pcmk-2 pcmk-1 ]\n"
" Clone Set: dlm-clone [dlm]\n"
" Started: [ pcmk-1 pcmk-2 ]\n"
" Clone Set: ClusterIP-clone [ClusterIP] (unique)\n"
" ClusterIP:0 (ocf::heartbeat:IPaddr2): Started pcmk-1\n"
" ClusterIP:1 (ocf::heartbeat:IPaddr2): Started pcmk-2\n"
" Clone Set: WebFS-clone [WebFS]\n"
" Started: [ pcmk-1 pcmk-2 ]\n"
" Clone Set: WebSite-clone [WebSite]\n"
" Started: [ pcmk-1 pcmk-2 ]\n"
" impi-fencing (stonith:fence_ipmilan): Started"
msgstr ""
#. Tag: para
#, no-c-format
msgid "In xml it should look similar to this."
msgstr ""
#. Tag: programlisting
#, no-c-format
msgid ""
"<cib admin_epoch=\"0\" cib-last-written=\"Fri Sep 14 13:43:13 2012\" crm_feature_set=\"3.0.6\" dc-uuid=\"1\" epoch=\"47\" have-quorum=\"1\" num_updates=\"50\" update-client=\"cibadmin\" update-origin=\"pcmk-1\" validate-with=\"pacemaker-1.2\">\n"
" <configuration>\n"
" <crm_config>\n"
" <cluster_property_set id=\"cib-bootstrap-options\">\n"
" <nvpair id=\"cib-bootstrap-options-dc-version\" name=\"dc-version\" value=\"1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\"/>\n"
" <nvpair id=\"cib-bootstrap-options-cluster-infrastructure\" name=\"cluster-infrastructure\" value=\"corosync\"/>\n"
" <nvpair id=\"cib-bootstrap-options-no-quorum-policy\" name=\"no-quorum-policy\" value=\"ignore\"/>\n"
" <nvpair id=\"cib-bootstrap-options-stonith-enabled\" name=\"stonith-enabled\" value=\"true\"/>\n"
" </cluster_property_set>\n"
" </crm_config>\n"
" <nodes>\n"
" <node id=\"1\" type=\"normal\" uname=\"pcmk-1\"/>\n"
" <node id=\"2\" type=\"normal\" uname=\"pcmk-2\"/>\n"
" </nodes>\n"
" <resources>\n"
" <master id=\"WebDataClone\">\n"
" <primitive class=\"ocf\" id=\"WebData\" provider=\"linbit\" type=\"drbd\">\n"
" <instance_attributes id=\"WebData-instance_attributes\">\n"
" <nvpair id=\"WebData-instance_attributes-drbd_resource\" name=\"drbd_resource\" value=\"wwwdata\"/>\n"
" </instance_attributes>\n"
" <operations>\n"
" <op id=\"WebData-interval-60s\" interval=\"60s\" name=\"monitor\"/>\n"
" </operations>\n"
" </primitive>\n"
" <meta_attributes id=\"WebDataClone-meta_attributes\">\n"
" <nvpair id=\"WebDataClone-meta_attributes-master-node-max\" name=\"master-node-max\" value=\"1\"/>\n"
" <nvpair id=\"WebDataClone-meta_attributes-clone-max\" name=\"clone-max\" value=\"2\"/>\n"
" <nvpair id=\"WebDataClone-meta_attributes-clone-node-max\" name=\"clone-node-max\" value=\"1\"/>\n"
" <nvpair id=\"WebDataClone-meta_attributes-notify\" name=\"notify\" value=\"true\"/>\n"
" <nvpair id=\"WebDataClone-meta_attributes-master-max\" name=\"master-max\" value=\"2\"/>\n"
" </meta_attributes>\n"
" </master>\n"
" <clone id=\"dlm-clone\">\n"
" <primitive class=\"ocf\" id=\"dlm\" provider=\"pacemaker\" type=\"controld\">\n"
" <instance_attributes id=\"dlm-instance_attributes\"/>\n"
" <operations>\n"
" <op id=\"dlm-interval-60s\" interval=\"60s\" name=\"monitor\"/>\n"
" </operations>\n"
" </primitive>\n"
" <meta_attributes id=\"dlm-clone-meta\">\n"
" <nvpair id=\"dlm-clone-max\" name=\"clone-max\" value=\"2\"/>\n"
" <nvpair id=\"dlm-clone-node-max\" name=\"clone-node-max\" value=\"1\"/>\n"
" </meta_attributes>\n"
" </clone>\n"
" <clone id=\"ClusterIP-clone\">\n"
" <primitive class=\"ocf\" id=\"ClusterIP\" provider=\"heartbeat\" type=\"IPaddr2\">\n"
" <instance_attributes id=\"ClusterIP-instance_attributes\">\n"
" <nvpair id=\"ClusterIP-instance_attributes-ip\" name=\"ip\" value=\"192.168.0.120\"/>\n"
" <nvpair id=\"ClusterIP-instance_attributes-cidr_netmask\" name=\"cidr_netmask\" value=\"32\"/>\n"
" <nvpair id=\"ClusterIP-instance_attributes-clusterip_hash\" name=\"clusterip_hash\" value=\"sourceip\"/>\n"
" </instance_attributes>\n"
" <operations>\n"
" <op id=\"ClusterIP-interval-30s\" interval=\"30s\" name=\"monitor\"/>\n"
" </operations>\n"
" </primitive>\n"
" <meta_attributes id=\"ClusterIP-clone-meta\">\n"
" <nvpair id=\"ClusterIP-globally-unique\" name=\"globally-unique\" value=\"true\"/>\n"
" <nvpair id=\"ClusterIP-clone-max\" name=\"clone-max\" value=\"2\"/>\n"
" <nvpair id=\"ClusterIP-clone-node-max\" name=\"clone-node-max\" value=\"2\"/>\n"
" </meta_attributes>\n"
" </clone>\n"
" <clone id=\"WebFS-clone\">\n"
" <primitive class=\"ocf\" id=\"WebFS\" provider=\"heartbeat\" type=\"Filesystem\">\n"
" <instance_attributes id=\"WebFS-instance_attributes\">\n"
" <nvpair id=\"WebFS-instance_attributes-device\" name=\"device\" value=\"/dev/drbd/by-res/wwwdata\"/>\n"
" <nvpair id=\"WebFS-instance_attributes-directory\" name=\"directory\" value=\"/var/www/html\"/>\n"
" <nvpair id=\"WebFS-instance_attributes-fstype\" name=\"fstype\" value=\"gfs2\"/>\n"
" </instance_attributes>\n"
" <meta_attributes id=\"WebFS-meta_attributes\"/>\n"
" </primitive>\n"
" <meta_attributes id=\"WebFS-clone-meta\"/>\n"
" </clone>\n"
" <clone id=\"WebSite-clone\">\n"
" <primitive class=\"ocf\" id=\"WebSite\" provider=\"heartbeat\" type=\"apache\">\n"
" <instance_attributes id=\"WebSite-instance_attributes\">\n"
" <nvpair id=\"WebSite-instance_attributes-configfile\" name=\"configfile\" value=\"/etc/httpd/conf/httpd.conf\"/>\n"
" <nvpair id=\"WebSite-instance_attributes-statusurl\" name=\"statusurl\" value=\"http://localhost/server-status\"/>\n"
" </instance_attributes>\n"
" <operations>\n"
" <op id=\"WebSite-interval-1min\" interval=\"1min\" name=\"monitor\"/>\n"
" </operations>\n"
" </primitive>\n"
" <meta_attributes id=\"WebSite-clone-meta\"/>\n"
" </clone>\n"
" <primitive class=\"stonith\" id=\"impi-fencing\" type=\"fence_ipmilan\">\n"
" <instance_attributes id=\"impi-fencing-instance_attributes\">\n"
" <nvpair id=\"impi-fencing-instance_attributes-pcmk_host_list\" name=\"pcmk_host_list\" value=\"pcmk-1 pcmk-2\"/>\n"
" <nvpair id=\"impi-fencing-instance_attributes-ipaddr\" name=\"ipaddr\" value=\"10.0.0.1\"/>\n"
" <nvpair id=\"impi-fencing-instance_attributes-login\" name=\"login\" value=\"testuser\"/>\n"
" <nvpair id=\"impi-fencing-instance_attributes-passwd\" name=\"passwd\" value=\"acd123\"/>\n"
" </instance_attributes>\n"
" <operations>\n"
" <op id=\"impi-fencing-interval-60s\" interval=\"60s\" name=\"monitor\"/>\n"
" </operations>\n"
" </primitive>\n"
" </resources>\n"
" <constraints>\n"
" <rsc_colocation id=\"colocation-WebSite-ClusterIP-INFINITY\" rsc=\"WebSite-clone\" score=\"INFINITY\" with-rsc=\"ClusterIP-clone\"/>\n"
" <rsc_order first=\"ClusterIP-clone\" first-action=\"start\" id=\"order-ClusterIP-WebSite-mandatory\" then=\"WebSite-clone\" then-action=\"start\"/>\n"
" <rsc_colocation id=\"colocation-WebFS-WebDataClone-INFINITY\" rsc=\"WebFS-clone\" score=\"INFINITY\" with-rsc=\"WebDataClone\" with-rsc-role=\"Master\"/>\n"
" <rsc_colocation id=\"colocation-WebSite-WebFS-INFINITY\" rsc=\"WebSite-clone\" score=\"INFINITY\" with-rsc=\"WebFS-clone\"/>\n"
" <rsc_order first=\"WebFS-clone\" id=\"order-WebFS-WebSite-mandatory\" then=\"WebSite-clone\"/>\n"
" <rsc_order first=\"WebDataClone\" first-action=\"promote\" id=\"order-WebDataClone-WebFS-mandatory\" then=\"WebFS-clone\" then-action=\"start\"/>\n"
" </constraints>\n"
" <rsc_defaults>\n"
" <meta_attributes id=\"rsc_defaults-options\">\n"
" <nvpair id=\"rsc_defaults-options-resource-stickiness\" name=\"resource-stickiness\" value=\"100\"/>\n"
" </meta_attributes>\n"
" </rsc_defaults>\n"
" <op_defaults>\n"
" <meta_attributes id=\"op_defaults-options\">\n"
" <nvpair id=\"op_defaults-options-timeout\" name=\"timeout\" value=\"240s\"/>\n"
" </meta_attributes>\n"
" </op_defaults>\n"
" </configuration>\n"
"</cib>"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Node List"
msgstr "Lista nodi"
#. Tag: para
#, no-c-format
msgid "The list of cluster nodes is automatically populated by the cluster."
msgstr "La lista dei nodi è popolata automaticamente dal cluster."
#. Tag: literallayout
#, no-c-format
msgid ""
"Pacemaker Nodes:\n"
" Online: [ pcmk-1 pcmk-2 ]"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Cluster Options"
msgstr "Opzioni del cluster"
#. Tag: para
#, no-c-format
msgid "This is where the cluster automatically stores some information about the cluster"
msgstr "Qui è dove il cluster registra automaticamente alcune informazioni in merito al cluster"
#. Tag: para
#, no-c-format
msgid "dc-version - the version (including upstream source-code hash) of Pacemaker used on the DC"
msgstr "dc-version - la versione (incluso l'hash del codice sorgente originale) di Pacemaker usata nel DC"
#. Tag: para
#, no-c-format
msgid "cluster-infrastructure - the cluster infrastructure being used (heartbeat or openais)"
msgstr "cluster-infrastructure - l'infrastruttura cluster utilizzata (heartbeat or openais)"
#. Tag: para
#, no-c-format
msgid "expected-quorum-votes - the maximum number of nodes expected to be part of the cluster"
msgstr "expected-quorum-votes - il numero massimo di nodi che ci si aspetta facciano parte del cluster"
#. Tag: para
#, no-c-format
msgid "and where the admin can set options that control the way the cluster operates"
msgstr "e dove l'amministratore può assegnare valori alle opzioni che controllano il modo in cui il cluster opera"
#. Tag: para
#, no-c-format
msgid "stonith-enabled=true - Make use of STONITH"
msgstr "stonith-enabled=true - Fai uso di STONITH"
#. Tag: para
#, no-c-format
msgid "no-quorum-policy=ignore - Ignore loss of quorum and continue to host resources."
msgstr "no-quorum-policy=ignore - Ignora la perdita di quorum e continua ad ospitare le risorse."
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs property\n"
"dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n"
"cluster-infrastructure: corosync\n"
"no-quorum-policy: ignore\n"
"stonith-enabled: true"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Resources"
msgstr "Risorse"
#. Tag: title
#, no-c-format
msgid "Default Options"
msgstr "Opzioni di default"
#. Tag: para
#, no-c-format
msgid "Here we configure cluster options that apply to every resource."
msgstr "Qui vengono configurate le opzioni del cluster che vanno applicati a tutte le risorse"
#. Tag: para
#, no-c-format
msgid "resource-stickiness - Specify the aversion to moving resources to other machines"
msgstr "resource-stickiness - Specifica l'impossibilità o meno di muovere risorse ad altre macchine"
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs resource rsc defaults\n"
"resource-stickiness: 100"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Fencing"
msgstr "Fencing"
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs stonith show\n"
" impi-fencing (stonith:fence_ipmilan) Started\n"
"# pcs stonith show impi-fencing\n"
"Resource: impi-fencing\n"
" pcmk_host_list: pcmk-1 pcmk-2\n"
" ipaddr: 10.0.0.1\n"
" login: testuser\n"
" passwd: acd123"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Service Address"
msgstr "Servizio Address"
#. Tag: para
#, fuzzy, no-c-format
msgid "Users of the services provided by the cluster require an unchanging address with which to access it. Additionally, we cloned the address so it will be active on both nodes. An iptables rule (created as part of the resource agent) is used to ensure that each request only gets processed by one of the two clone instances. The additional meta options tell the cluster that we want two instances of the clone (one \"request bucket\" for each node) and that if one node fails, then the remaining node should hold both."
msgstr "Gli utenti dei servizi forniti dal cluster richiedono un indirizzo invariato con cui accedervi. Inoltre, l'indirizzo viene clonato così da essere attivo su entrambi i nodi. Una regola iptables (creata come parte del resource agent) viene utilizzata per assicurarsi che ogni richiesta venga processata unicamente da una delle due istanze clone. Le opzioni meta aggiuntive indicano al cluste che si necessità di due istanze del clone (una \"request bucket\" per ogni nodo) e che se un nodo muote, allora il nodo rimanente deve erogarle entrambe."
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs resource show ClusterIP-clone\n"
"Resource: ClusterIP-clone\n"
" ip: 192.168.0.120\n"
" cidr_netmask: 32\n"
" clusterip_hash: sourceip\n"
" globally-unique: true\n"
" clone-max: 2\n"
" clone-node-max: 2\n"
" op monitor interval=30s"
msgstr ""
#. Tag: para
#, no-c-format
msgid "TODO: The RA should check for globally-unique=true when cloned"
msgstr "TODO: Il RA quando clonato dovrebbe controllare l'opzione globally-unique=true"
#. Tag: title
#, no-c-format
msgid "DRBD - Shared Storage"
msgstr "DRBD - Storage condiviso"
#. Tag: para
#, no-c-format
msgid "Here we define the DRBD service and specify which DRBD resource (from drbd.conf) it should manage. We make it a master/slave resource and, in order to have an active/active setup, allow both instances to be promoted by specifying master-max=2. We also set the notify option so that the cluster will tell DRBD agent when it’s peer changes state."
msgstr "Qui viene definito il servizio DRBD e specificata quale risorsa DRBD (da drbd.conf) questi debba controllare. La risorsa viene definita come master/slave e, per rispettare il setup active/active, entrambe le istanze vengono abilitate ad essere promosse specificando master-max=2. Viene valorizzata inoltre l'opzione notify, così che il cluster comunicherà all'agent DRBD quando il suo nodo cambierà stato."
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs resource show WebDataClone\n"
"Resource: WebDataClone\n"
" drbd_resource: wwwdata\n"
" master-node-max: 1\n"
" clone-max: 2\n"
" clone-node-max: 1\n"
" notify: true\n"
" master-max: 2\n"
" op monitor interval=60s\n"
"# pcs constraint ref WebDataClone\n"
"Resource: WebDataClone\n"
" colocation-WebFS-WebDataClone-INFINITY\n"
" order-WebDataClone-WebFS-mandatory"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Cluster Filesystem"
msgstr "Cluster Filesystem"
#. Tag: para
#, no-c-format
msgid "The cluster filesystem ensures that files are read and written correctly. We need to specify the block device (provided by DRBD), where we want it mounted and that we are using GFS2. Again it is a clone because it is intended to be active on both nodes. The additional constraints ensure that it can only be started on nodes with active gfs-control and drbd instances."
msgstr "Il Cluster Filesystem si assicura che i file vengano letti e scritti nella maniera corretta. E' necessario specificare il block device (fornito da DRBD), dove si vuole effettuare l'operazione di mount e che viene utilizzato GFS2. Di nuovo questo è un clone, perché è inteso essere attivo su entrambi i nodi. La constraint aggiuntiva assicura che la risorsa possa essere avviata su nodi con gfs-control attivo e istanze drbd."
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs resource show WebFS-clone\n"
"Resource: WebFS-clone\n"
" device: /dev/drbd/by-res/wwwdata\n"
" directory: /var/www/html\n"
" fstype: gfs2\n"
"# pcs constraint ref WebFS-clone\n"
"Resource: WebFS-clone\n"
" colocation-WebFS-WebDataClone-INFINITY\n"
" colocation-WebSite-WebFS-INFINITY\n"
" order-WebFS-WebSite-mandatory\n"
" order-WebDataClone-WebFS-mandatory"
msgstr ""
#. Tag: title
#, no-c-format
msgid "Apache"
msgstr "Apache"
#. Tag: para
#, no-c-format
msgid "Lastly we have the actual service, Apache. We need only tell the cluster where to find it’s main configuration file and restrict it to running on nodes that have the required filesystem mounted and the IP address active."
msgstr "Infine viene definito il servizio Apache. E' necessario solamente specificare al cluster dove trovare il file di configurazione principale e costringere questo ad essere eseguito su nodi con il filesystem richiesto montato e l'indirizzo IP attivo."
#. Tag: programlisting
#, no-c-format
msgid ""
"# pcs resource show WebSite-clone\n"
"Resource: WebSite-clone\n"
" configfile: /etc/httpd/conf/httpd.conf\n"
" statusurl: http://localhost/server-status\n"
" master-max: 2\n"
" op monitor interval=1min\n"
"# pcs constraint ref WebSite-clone\n"
"Resource: WebSite-clone\n"
" colocation-WebSite-ClusterIP-INFINITY\n"
" colocation-WebSite-WebFS-INFINITY\n"
" order-ClusterIP-WebSite-mandatory\n"
" order-WebFS-WebSite-mandatory"
msgstr ""
#~ msgid "TODO: Add text here"
#~ msgstr "TODO: Aggiungi il testo qui"
#~ msgid "Distributed lock manager"
#~ msgstr "Distributed lock manager"
#~ msgid "Cluster filesystems like GFS2 require a lock manager. This service starts the daemon that provides user-space applications (such as the GFS2 daemon) with access to the in-kernel lock manager. Since we need it to be available on all nodes in the cluster, we have it cloned."
#~ msgstr "I filesystem cluster come GFS2 richiedono un lock manager. Questo servizio avvia il demone che fornisce applicazioni user-spacc (come il demone GFS2) con accesso al lock manager interno al kernel. Dato che si necessita di averlo attivo su tutti nodi del cluste, questo viene clonato."
#~ msgid "TODO: Confirm <literal>interleave</literal> is no longer needed"
#~ msgstr "TODO: La conferma del parametro <literal>interleave</literal> non è più necessaria"
#~ msgid "GFS control daemon"
#~ msgstr "Demone di controllo di GFS"
#~ msgid "GFS2 also needs a user-space/kernel bridge that runs on every node. So here we have another clone, however this time we must also specify that it can only run on machines that are also running the DLM (colocation constraint) and that it can only be started after the DLM is running (order constraint). Additionally, the gfs-control clone should only care about the DLM instances it is paired with, so we need to set the interleave option."
#~ msgstr "GFS2 inoltre necessita di un ponte user-space/kernel eseguito su ogni nodo. Così qui è presente un altro clone, sebbene questa volta va scpecificato che può girare su macchine che tanno eseguendo anche il DLM (colocation constraint) e che questo può solamente essere avviato dopo che il DLM viene a sua volta avviato (order constraint). Inoltre, il clone gfs-control deve preoccuparsi unicamente dell'istanza DLM a cui è associato, così non è necessario valorizzare l'opzione interleave."